下载模型
ollama pull qwen:0.5b
本地运行
ollama run qwen:0.5b
局域网连接
修改监听ip
vim /etc/systemd/system/ollama.service
在文件中添加
Environment="OLLAMA_HOST=0.0.0.0:11434"
cat /etc/systemd/system/ollama.service
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/usr/local/bin/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=/usr/local/cuda-12.5/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin"
Environment="OLLAMA_HOST=0.0.0.0:11434"
[Install]
WantedBy=default.target
systemctl daemon-reload
systemctl restart ollama.service
# 测试代码 python
from langchain_community.chat_models import ChatOllama
from langchain_core.messages import HumanMessage
ollama_llm = ChatOllama(model="qwen:0.5b", base_url='http://172.10.10.2:11434')
messages = [
HumanMessage(
content="你好,请你介绍一下你自己",
)
]
chat_model_response = ollama_llm.invoke(messages)
print(chat_model_response)