"""
Requirements:
Install Ollama ──────────────────────────────────────────
Donwload from https://ollama.ai/downlaod

# Linux (Install and run the service)
curl -fsSL https://ollama.com/install.sh | sh

macOS
# Download the .dmg file and drag it in /Applications

# Windows
# Download the .exe file and install it

# Download (pull) an LLM
ollama pull qwen3.5:4b

# Verify that ollama server is up and running
ollama list

# Instlla Ollama Python Package
pip install ollama
─────────────────────────────────────────────────────────
"""
import ollama

task = (
    "A train travels 120 km in 1.5 h.\n"
    "A car covers the same distance in 2 h.\n"
    "How much faster is the train (km/h) than the car?"
)
prompt = task + "\n\nThink step by step, then give the Answer." # Emerging Zero-Shot CoT
print(prompt)
# model = "qwen3.5:9b"
model="qwen3.5:4b"

response = ollama.chat(
    model=model,
    messages=[{"role": "user", "content": prompt}])

print(response["message"]["content"])
