ollama init to common utils

This commit is contained in:
2025-11-26 23:24:01 +01:00
parent cfb0844511
commit b9a8041853
5 changed files with 51 additions and 47 deletions

2
.gitignore vendored
View File

@@ -9,3 +9,5 @@
.ipynb_checkpoints/
__marimo__
__pycache__/

View File

@@ -7,27 +7,13 @@ app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import requests
import ollama
from ollama import Client
from utils import connect_qumo_ollama
VM_NAME = 'hiperf-gpu'
#VM_NAME = 'ollama-vb'
# VM_NAME = 'hiperf-gpu'
VM_NAME = 'ollama-lite'
QUMO_OLLAMA_URL = f'http://{VM_NAME}.tail44fa00.ts.net:11434'
return Client, QUMO_OLLAMA_URL, mo, requests
@app.cell
def _(Client, QUMO_OLLAMA_URL, requests):
try:
requests.get(QUMO_OLLAMA_URL, timeout=5)
client = Client(
host=QUMO_OLLAMA_URL
)
except requests.ConnectionError:
print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up")
return
client = connect_qumo_ollama(VM_NAME)
return (mo,)
@app.cell(hide_code=True)

View File

@@ -7,33 +7,13 @@ app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import requests
import ollama
from ollama import Client
from utils import connect_qumo_ollama
VM_NAME = 'hiperf-gpu'
#VM_NAME = 'ollama-vb'
# VM_NAME = 'hiperf-gpu'
VM_NAME = 'ollama-lite'
QUMO_OLLAMA_URL = f'http://{VM_NAME}.tail44fa00.ts.net:11434'
return Client, QUMO_OLLAMA_URL, VM_NAME, mo, requests
@app.cell
def _(Client, QUMO_OLLAMA_URL, requests):
try:
requests.get(QUMO_OLLAMA_URL, timeout=5)
client = Client(
host=QUMO_OLLAMA_URL
)
except requests.ConnectionError:
print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up")
return (client,)
@app.cell
def _():
print("Hello")
return
client = connect_qumo_ollama(VM_NAME)
return VM_NAME, client, mo
@app.cell(hide_code=True)
@@ -82,7 +62,7 @@ def _(mo):
@app.cell
def _(client):
response_chat = client.chat(model='deepseek-r1:32b', messages=[
response_chat = client.chat(model='gemini-3-pro-preview:latest', messages=[
{
'role': 'user',
'content': 'Why is the sky blue?',
@@ -109,7 +89,7 @@ def _(mo):
@app.cell
def _(client):
response_generate = client.generate(model='deepseek-r1:7b', prompt='Why is the sky blue?')
response_generate = client.generate(model='deepseek-r1:32b', prompt='Why is the sky blue?')
return (response_generate,)

View File

@@ -0,0 +1,4 @@
{
"type": "slides",
"data": {}
}

32
utils.py Normal file
View File

@@ -0,0 +1,32 @@
"""
Standard utils for this repository
"""
import requests
import ollama
from ollama import Client
def connect_qumo_ollama(vm_name: str ='ollama-lite') -> Client:
"""Establish connection to Qumo Ollama instance
vm_name: str ('ollama-lite' or 'hiperf-gpu')
Name of the VM running the Ollama instance
Returns:
tuple(Client): Ollama client connected to the specified VM
"""
QUMO_OLLAMA_URL = f'http://{vm_name}.tail44fa00.ts.net:11434'
try:
requests.get(QUMO_OLLAMA_URL, timeout=5)
client = Client(
host=QUMO_OLLAMA_URL
)
except requests.ConnectionError:
print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up")
print("Connection succesful.\nAvailable models:")
for m in client.list().models:
print(f" - '{m.model}' ")
return client