diff --git a/charts/ialacol/Chart.yaml b/charts/ialacol/Chart.yaml index 8cf5f32400f76fb72661a39d31114ebcc7e2f4ec..c404da0c256faaa59daf2bc053307842e1efb083 100644 --- a/charts/ialacol/Chart.yaml +++ b/charts/ialacol/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 0.11.0 +appVersion: 0.11.1 description: A Helm chart for ialacol name: ialacol type: application -version: 0.11.0 +version: 0.11.1 diff --git a/charts/ialacol/templates/deployment.yaml b/charts/ialacol/templates/deployment.yaml index f17c2cf967700d59d9eff6d3ac93ed425c3bcd8a..acd1b4f5c08767c8837107ded11e890f665aa6c4 100644 --- a/charts/ialacol/templates/deployment.yaml +++ b/charts/ialacol/templates/deployment.yaml @@ -57,6 +57,8 @@ spec: value: {{ (.Values.deployment.env).GPU_LAYERS | quote }} - name: MODE_TYPE value: {{ (.Values.deployment.env).MODE_TYPE | quote }} + - name: TRUNCATE_PROMPT_LENGTH + value: {{ (.Values.deployment.env).TRUNCATE_PROMPT_LENGTH | quote }} volumeMounts: - mountPath: /app/models name: model diff --git a/charts/ialacol/values.yaml b/charts/ialacol/values.yaml index 951bcd7614ff2c7cebbe1e30b88815f03225e9ca..08e45a848a92a1e1a260e4076361996ce656306d 100644 --- a/charts/ialacol/values.yaml +++ b/charts/ialacol/values.yaml @@ -1,7 +1,7 @@ replicas: 1 deployment: - image: quay.io/chenhunghan/ialacol:latest + image: ghcr.io/chenhunghan/ialacol:latest # or use CUDA image `ghcr.io/chenhunghan/ialacol-cuda12:latest` # env: # DEFAULT_MODEL_HG_REPO_ID: TheBloke/Llama-2-7B-Chat-GGML