Files
Interview-Analysis/VB_interviews_sandbox.py

108 lines
1.9 KiB
Python

import marimo
__generated_with = "0.18.0"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _():
import ollama
from ollama import Client
client = Client(
host='http://ollama-vb.tail44fa00.ts.net:11434'
)
return (client,)
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
# Ollama Reference
## Ollama Web-UI: http://ollama-vb.tail44fa00.ts.net:3000
Use the UI to modify system prompts, custom models, etc...
## Ollama Python
Docs: https://github.com/ollama/ollama-python
Use the code below to programmatically interact with the models. E.g: create a small pipeline that loads a transcript and inserts it into the prompt. Helpful if we need to analyze 26 interviews...
**Important Definitions:**
- **Generate**: post a single message and get a response.
- **Chat**: post a single message and the previous chat history, and get a response
""")
return
@app.cell
def _(client):
client.list().models
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
# Sandbox Generate vs. Chat
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Chat
""")
return
@app.cell
def _(client):
response_chat = client.chat(model='deepseek-r1:7b', messages=[
{
'role': 'user',
'content': 'Why is the sky blue?',
},
])
return (response_chat,)
@app.cell
def _(mo, response_chat):
mo.md(rf"""
{response_chat.message.content}
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Generate
""")
return
@app.cell
def _(client):
response_generate = client.generate(model='deepseek-r1:7b', prompt='Why is the sky blue?')
return (response_generate,)
@app.cell
def _(mo, response_generate):
mo.md(rf"""
{response_generate.response}
""")
return
if __name__ == "__main__":
app.run()