diff --git a/.gitignore b/.gitignore index 45d1bfc..e56a089 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,6 @@ # Ignore Jupyter Notebook checkpoints .ipynb_checkpoints/ -__marimo__ \ No newline at end of file +__marimo__ +__pycache__/ + diff --git a/Sentiment_Analysis_Research.py b/Sentiment_Analysis_Research.py index 116635e..5e36c39 100644 --- a/Sentiment_Analysis_Research.py +++ b/Sentiment_Analysis_Research.py @@ -7,27 +7,13 @@ app = marimo.App(width="medium") @app.cell def _(): import marimo as mo - import requests - import ollama - from ollama import Client + from utils import connect_qumo_ollama - VM_NAME = 'hiperf-gpu' - #VM_NAME = 'ollama-vb' + # VM_NAME = 'hiperf-gpu' + VM_NAME = 'ollama-lite' - QUMO_OLLAMA_URL = f'http://{VM_NAME}.tail44fa00.ts.net:11434' - return Client, QUMO_OLLAMA_URL, mo, requests - - -@app.cell -def _(Client, QUMO_OLLAMA_URL, requests): - try: - requests.get(QUMO_OLLAMA_URL, timeout=5) - client = Client( - host=QUMO_OLLAMA_URL - ) - except requests.ConnectionError: - print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up") - return + client = connect_qumo_ollama(VM_NAME) + return (mo,) @app.cell(hide_code=True) diff --git a/VB_interviews_sandbox.py b/VB_interviews_sandbox.py index 11b6e66..363551b 100644 --- a/VB_interviews_sandbox.py +++ b/VB_interviews_sandbox.py @@ -7,33 +7,13 @@ app = marimo.App(width="medium") @app.cell def _(): import marimo as mo - import requests - import ollama - from ollama import Client + from utils import connect_qumo_ollama - VM_NAME = 'hiperf-gpu' - #VM_NAME = 'ollama-vb' + # VM_NAME = 'hiperf-gpu' + VM_NAME = 'ollama-lite' - QUMO_OLLAMA_URL = f'http://{VM_NAME}.tail44fa00.ts.net:11434' - return Client, QUMO_OLLAMA_URL, VM_NAME, mo, requests - - -@app.cell -def _(Client, QUMO_OLLAMA_URL, requests): - try: - requests.get(QUMO_OLLAMA_URL, timeout=5) - client = Client( - host=QUMO_OLLAMA_URL - ) - except requests.ConnectionError: - print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up") - return (client,) - - -@app.cell -def _(): - print("Hello") - return + client = connect_qumo_ollama(VM_NAME) + return VM_NAME, client, mo @app.cell(hide_code=True) @@ -82,7 +62,7 @@ def _(mo): @app.cell def _(client): - response_chat = client.chat(model='deepseek-r1:32b', messages=[ + response_chat = client.chat(model='gemini-3-pro-preview:latest', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', @@ -109,7 +89,7 @@ def _(mo): @app.cell def _(client): - response_generate = client.generate(model='deepseek-r1:7b', prompt='Why is the sky blue?') + response_generate = client.generate(model='deepseek-r1:32b', prompt='Why is the sky blue?') return (response_generate,) diff --git a/layouts/VB_interviews_sandbox.slides.json b/layouts/VB_interviews_sandbox.slides.json new file mode 100644 index 0000000..af4970a --- /dev/null +++ b/layouts/VB_interviews_sandbox.slides.json @@ -0,0 +1,4 @@ +{ + "type": "slides", + "data": {} +} \ No newline at end of file diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..4ab1c1d --- /dev/null +++ b/utils.py @@ -0,0 +1,32 @@ +""" +Standard utils for this repository +""" + +import requests +import ollama +from ollama import Client + + +def connect_qumo_ollama(vm_name: str ='ollama-lite') -> Client: + """Establish connection to Qumo Ollama instance + + vm_name: str ('ollama-lite' or 'hiperf-gpu') + Name of the VM running the Ollama instance + + Returns: + tuple(Client): Ollama client connected to the specified VM + """ + QUMO_OLLAMA_URL = f'http://{vm_name}.tail44fa00.ts.net:11434' + try: + requests.get(QUMO_OLLAMA_URL, timeout=5) + client = Client( + host=QUMO_OLLAMA_URL + ) + except requests.ConnectionError: + print(f"Failed to reach {QUMO_OLLAMA_URL}. Check that the VM is running and Tailscale is up") + + print("Connection succesful.\nAvailable models:") + for m in client.list().models: + print(f" - '{m.model}' ") + return client +