diff --git a/.github/workflows/generate-release.yml b/.github/workflows/generate-release.yml index ebcd0c988..82d08d0b4 100644 --- a/.github/workflows/generate-release.yml +++ b/.github/workflows/generate-release.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: - type: [ llamacpp, ollama ] + type: [ llamacpp-cpu, ollama ] permissions: contents: read diff --git a/Dockerfile.llamacpp b/Dockerfile.llamacpp-cpu similarity index 100% rename from Dockerfile.llamacpp rename to Dockerfile.llamacpp-cpu diff --git a/docker-compose.yaml b/docker-compose.yaml index 8c6943813..a5df4647e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,7 +7,7 @@ services: # Private-GPT service for the Ollama CPU and GPU modes # This service builds from an external Dockerfile and runs the Ollama mode. private-gpt-ollama: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-external + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-ollama build: context: . dockerfile: Dockerfile.ollama @@ -30,11 +30,11 @@ services: # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. - private-gpt-local: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-local + private-gpt-llamacpp-cpu: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-llamacpp-cpu build: context: . - dockerfile: Dockerfile.llamacpp + dockerfile: Dockerfile.llamacpp-cpu volumes: - ./local_data/:/home/worker/app/local_data - ./models/:/home/worker/app/models @@ -46,7 +46,7 @@ services: PGPT_PROFILES: local HF_TOKEN: ${HF_TOKEN} profiles: - - llamacpp + - llamacpp-cpu #----------------------------------- #---- Ollama services -------------- diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index 70423e453..9bcb8804b 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -78,7 +78,7 @@ A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face model **Run:** Start the services with your Hugging Face token using pre-built images: ```sh -HF_TOKEN= docker-compose up --profile llamacpp +HF_TOKEN= docker-compose up --profile llamacpp-cpu ``` Replace `` with your actual Hugging Face token.