This page provides an overview of Assistant Chat threads
Endpoint, URL, and Supported Methods
Objects are managed via the View Assistant server API at [http|https]://[hostname]:[port]/v1.0/tenants/[tenant-guid]/models
Supported methods include:POST
List local models
To lost local models, call POST /v1.0/tenants/[tenant-guid]/assistant/models
curl --location 'http://view.homedns.org:8000/v1.0/tenants/00000000-0000-0000-0000-000000000000/assistant/models' \
--header 'Content-Type: application/json' \
--data '{
"OllamaHostname": "ollama",
"OllamaPort": 11434
}'
import { ViewAssistantSdk } from "view-sdk";
const assistant = new ViewAssistantSdk(
"00000000-0000-0000-0000-000000000000", //tenant Id
"default", //access token
"http://localhost:8000/" //endpoint
);
const retrieveLocalModels = async () => {
try {
const response = await assistant.retrieveLocalModels({
OllamaHostname: "ollama",
OllamaPort: 11434,
});
console.log(response);
} catch (err) {
console.log("Error retrieving local models:", err);
}
};
retrieveLocalModels();
import view_sdk
from view_sdk import assistant
sdk = view_sdk.configure( access_key="default",base_url="localhost", tenant_guid= "00000000-0000-0000-0000-000000000000")
def listModels():
result = assistant.Models.retrieve_all(
OllamaHostname= "ollama",
OllamaPort= 11434
)
print(result)
listModels()
Response
[
{
"ModelName": "llama3.1:latest",
"ModelFamily": "llama",
"ParameterSize": "8.0B"
}
]
Retrieve model
To retrieve a model, call POST /v1.0/tenants/[tenant-guid]/assistant/models/pull
curl --location 'http://view.homedns.org:8000/v1.0/tenants/00000000-0000-0000-0000-000000000000/assistant/models/pull' \
--header 'Content-Type: application/json' \
--data '{
"ModelName": "llama3.1:latest",
"OllamaHostname": "ollama",
"OllamaPort": 11434
}'
import { ViewAssistantSdk } from "view-sdk";
const assistant = new ViewAssistantSdk(
"00000000-0000-0000-0000-000000000000", //tenant Id
"default", //access token
"http://localhost:8000/" //endpoint
);
const retrieveModels = async () => {
try {
const response = await assistant.retrieveModel(
{
ModelName: "llama3.1:latest",
OllamaHostname: "ollama",
OllamaPort: 11434,
},
(token) => {
console.log(token);
}
);
console.log(response);
} catch (err) {
console.log("Error retrieving models:", err);
}
};
retrieveModels();
import view_sdk
from view_sdk import assistant
sdk = view_sdk.configure( access_key="default",base_url="localhost", tenant_guid= "00000000-0000-0000-0000-000000000000")
def retrieveModel():
result = assistant.Models.retrieve(
ModelName= "llama3.1:latest",
OllamaHostname= "ollama",
OllamaPort= 11434
)
print(result)
retrieveModel()
Delete model
To lost local models, call POST /v1.0/tenants/[tenant-guid]/assistant/models/delete
curl --location 'http://view.homedns.org:8000/v1.0/tenants/00000000-0000-0000-0000-000000000000/assistant/models/delete' \
--header 'Content-Type: application/json' \
--data '{
"ModelName": "llama3.1:latestss",
"OllamaHostname": "localhost",
"OllamaPort": 11434
}'
import { ViewAssistantSdk } from "view-sdk";
const assistant = new ViewAssistantSdk(
"00000000-0000-0000-0000-000000000000", //tenant Id
"default", //access token
"http://localhost:8000/" //endpoint
);
const deleteModel = async () => {
try {
const response = await assistant.deleteModel({
ModelName: "llama3.1:latest",
OllamaHostname: "ollama",
OllamaPort: 11434,
});
console.log(response);
} catch (err) {
console.log("Error deleting model:", err);
}
};
deleteModel();
import view_sdk
from view_sdk import assistant
sdk = view_sdk.configure( access_key="default",base_url="localhost", tenant_guid= "00000000-0000-0000-0000-000000000000")
def deleteModel():
result = assistant.Models.delete(
ModelName= "llama3.1:latest",
OllamaHostname= "ollama",
OllamaPort= 114323
)
print(result)
deleteModel()
Preload Model
To preload model, call POST /v1.0/tenants/[tenant-guid]/assistant/models/load
curl --location 'http://view.homedns.org:8000/v1.0/tenants/00000000-0000-0000-0000-000000000000/assistant/models/load' \
--header 'Content-Type: application/json' \
--data '{
"ModelName": "llama3.1:latest",
"OllamaHostname": "ollama",
"OllamaPort": 11434
}'
import { ViewAssistantSdk } from "view-sdk";
const assistant = new ViewAssistantSdk(
"00000000-0000-0000-0000-000000000000", //tenant Id
"default", //access token
"http://localhost:8000/" //endpoint
);
const preloadModel = async () => {
try {
const response = await assistant.loadUnloadModel({
ModelName: "llama3.1:latest",
OllamaHostname: "ollama",
OllamaPort: 11434,
});
console.log(response);
} catch (err) {
console.log("Error preloading model:", err);
}
};
preloadModel();
import view_sdk
from view_sdk import assistant
sdk = view_sdk.configure( access_key="default",base_url="localhost", tenant_guid= "00000000-0000-0000-0000-000000000000")
def loadModel():
result = assistant.Models.load_unload(
ModelName= "llama3.1:latest",
OllamaHostname= "ollama",
OllamaPort= 11434
)
print(result)
loadModel()
Response
{
"message": "Model llama3.1:latest successfully loaded",
"details": {
"model": "llama3.1:latest",
"created_at": "2025-04-29T13:08:12.461027637Z",
"message": {
"role": "assistant",
"content": ""
},
"done_reason": "load",
"done": true
}
}
Unload Model
To lost local models, call POST /v1.0/tenants/[tenant-guid]/assistant/models/load
curl --location 'http://view.homedns.org:8000/v1.0/tenants/00000000-0000-0000-0000-000000000000/assistant/models/load' \
--header 'Content-Type: application/json' \
--data '{
"ModelName": "qwen2.5:7b",
"OllamaHostname": "localhost",
"OllamaPort": 11434,
"Unload": true
}'
import { ViewAssistantSdk } from "view-sdk";
const assistant = new ViewAssistantSdk(
"00000000-0000-0000-0000-000000000000", //tenant Id
"default", //access token
"http://localhost:8000/" //endpoint
);
const unloadModel = async () => {
try {
const response = await assistant.loadUnloadModel({
ModelName: "qwen2.5:7b",
OllamaHostname: "localhost",
OllamaPort: 11434,
Unload: true,
});
console.log(response);
} catch (err) {
console.log("Error unloading model:", err);
}
};
unloadModel();
import view_sdk
from view_sdk import assistant
sdk = view_sdk.configure( access_key="default",base_url="localhost", tenant_guid= "00000000-0000-0000-0000-000000000000")
def unLoadModel():
result = assistant.Models.load_unload(
Unload= True,
ModelName= "llama3.1:latest",
OllamaHostname= "ollama",
OllamaPort= 11434
)
print(result)
unLoadModel()
Response
{
"message": "Model qwen2.5:7b successfully unloaded",
"details": {
"model": "qwen2.5:7b",
"created_at": "2025-04-30T10:30:58.73951003Z",
"message": {
"role": "assistant",
"content": ""
},
"done_reason": "unload",
"done": true
}
}