diff --git a/Dockerfile b/Dockerfile
index c898767..cce06a0 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM python:3.11-alpine
+FROM python:3.11.6-alpine
RUN apk add --no-cache build-base libffi-dev
RUN pip install poetry
diff --git a/README.md b/README.md
index 00908a6..34e2950 100644
--- a/README.md
+++ b/README.md
@@ -4,62 +4,38 @@
## Dependencies
-- Redis
-
-```
-python3 -m pip install poetry
-poetry install
-```
+- Docker and Docker Compose
## Run `discollama.py`
```
-poetry run python discollama.py
+DISCORD_TOKEN=xxxxx docker compose up
```
-_Note: You must setup a [Discord Bot](https://discord.com/developers/applications) and set environment variable `DISCORD_TOKEN` before `discollama.py` can access Discord._
+> Note: You must setup a [Discord Bot](https://discord.com/developers/applications) and set environment variable `DISCORD_TOKEN` before `discollama.py` can access Discord.
-`discollama.py` requires an [Ollama](https://github.com/jmorganca/ollama) server. Follow the steps in the [Ollama](https://github.com/jmorganca/ollama) repository to setup Ollama.
+`discollama.py` requires an [Ollama](https://github.com/jmorganca/ollama) server. Follow the steps in [jmorganca/ollama](https://github.com/jmorganca/ollama) repository to setup Ollama.
-By default, it uses `127.0.0.1:11434` but this can be configured with command line parameters `--ollama-host` and `--ollama-port`.
+By default, it uses `127.0.0.1:11434` which can be overwritten with `OLLAMA_HOST`.
+
+> Note: Deploying this on Linux requires updating network configurations and `OLLAMA_HOST`.
## Customize `discollama.py`
-The default LLM is `llama2`. A custom personality can be added by changing the `SYSTEM` instruction in the Modelfile and running `ollama create`:
+The default LLM is `mike/discollama`. A custom personality can be added by changing the `SYSTEM` instruction in the Modelfile and running `ollama create`:
```
-ollama create discollama -f Modelfile
+ollama create mymodel -f Modelfile
```
-This is set in `discollama.py` through `--ollama-model`:
+This can be changed in `compose.yaml`:
```
-poetry run python discollama.py --ollama-model discollama
+environment:
+ - OLLAMA_MODEL=mymodel
```
-Additional LLM parameters can be set in the same Modelfile through `PARAMETER` instructions:
-
-```
-FROM llama2
-
-PARAMETER temperature 2
-PARAMETER stop [INST]
-PARAMETER stop [/INST]
-PARAMETER stop <>
-PARAMETER stop <>
-```
-
-If customizing the system prompt is not enough, you can configure the full prompt template:
-
-```
-FROM llama2
-
-TEMPLATE """[INST] {{ if .First }}<>{{ .System }}<>
-
-{{ end }} Tweet: 'I hate it when my phone battery dies.' [/INST] Sentiment: Negative [INST] Tweet: 'My day has been 👍' [/INST] Sentiment: Positive [INST] Tweet: 'This is the link to the article' [/INST] Sentiment: Neutral [INST] Tweet: '{{ .Prompt }}' [/INST] Sentiment: """
-```
-
-This model replies with the sentiment of the the input prompt: positive, negative, or neutral.
+See [jmorganca/ollama](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md) for more details.
## Activating the Bot
diff --git a/compose.yaml b/compose.yaml
index c155060..1e185ba 100644
--- a/compose.yaml
+++ b/compose.yaml
@@ -8,9 +8,9 @@ services:
command: ['--redis-host', 'redis']
environment:
- DISCORD_TOKEN
- - OLLAMA_HOST=host.docker.internal
+ - OLLAMA_HOST
- OLLAMA_PORT=11434
- - OLLAMA_MODEL=discollama
+ - OLLAMA_MODEL=mike/discollama
redis:
image: redis/redis-stack-server:latest