added local ollama support
This commit is contained in:
10
utils.py
10
utils.py
@@ -61,7 +61,7 @@ def load_srt(path: str | Path) -> str:
|
||||
return '\n\n'.join(transcript_lines)
|
||||
|
||||
|
||||
def connect_qumo_ollama(vm_name: str ='ollama-lite') -> Client:
|
||||
def connect_qumo_ollama(vm_name: str ='ollama-lite', port='11434') -> Client:
|
||||
"""Establish connection to Qumo Ollama instance
|
||||
|
||||
vm_name: str ('ollama-lite' or 'hiperf-gpu')
|
||||
@@ -70,14 +70,18 @@ def connect_qumo_ollama(vm_name: str ='ollama-lite') -> Client:
|
||||
Returns:
|
||||
tuple(Client): Ollama client connected to the specified VM
|
||||
"""
|
||||
QUMO_OLLAMA_URL = f'http://{vm_name}.tail44fa00.ts.net:11434'
|
||||
QUMO_OLLAMA_URL = f'http://{vm_name}.tail44fa00.ts.net:{port}'
|
||||
|
||||
if vm_name in ['localhost', '0.0.0.0']:
|
||||
QUMO_OLLAMA_URL = f"http://{vm_name}:{port}"
|
||||
|
||||
try:
|
||||
requests.get(QUMO_OLLAMA_URL, timeout=5)
|
||||
client = Client(
|
||||
host=QUMO_OLLAMA_URL
|
||||
)
|
||||
|
||||
print(f"Connection succesful. WebUI available at: http://{vm_name}.tail44fa00.ts.net:3000\nAvailable models:")
|
||||
print(f"Connection succesful. WebUI available at: {QUMO_OLLAMA_URL.replace(port, '3000')}\nAvailable models:")
|
||||
for m in client.list().models:
|
||||
print(f" - '{m.model}' ")
|
||||
return client
|
||||
|
||||
Reference in New Issue
Block a user