diff --git a/ai-coder.sh b/ai-coder.sh
index a6ddf1d..7d2012d 100644
--- a/ai-coder.sh
+++ b/ai-coder.sh
@@ -1,8 +1,10 @@
# Load config
. ${HOME}/ayo.conf
+model=qwen3-coder:30b
+
if ! [ "$2" = "" ]; then
- ollama run qwen3-coder:30b "$sys_prompt...
beginning prompt...
$@"
+ ollama run $model "%coder_prompt...
beginning prompt...
$@" --hidethinking
else
- ollama run qwen3-coder:30b
+ ollama run $model --hidethinking
fi
diff --git a/ai.sh b/ai.sh
index b70915e..282d1d2 100644
--- a/ai.sh
+++ b/ai.sh
@@ -1,6 +1,9 @@
# Load config
. ${HOME}/ayo.conf
+# model=deepseek-r1:8b
+model=qwen3-coder:30b
+
if ! [ "$2" = "" ]; then
if [ "$2" = "wake" ]; then
. $HOME/llm_env/bin/activate
@@ -8,8 +11,8 @@ if ! [ "$2" = "" ]; then
. $HOME/intel/oneapi/setvars.sh
$HOME/llama-cpp/ollama serve
else
- ollama run deepseek-r1:8b "$sys_prompt...
beginning prompt...
$@" --hidethinking
+ ollama run $model "$chat_prompt...
beginning prompt...
$@" --hidethinking
fi
else
- ollama run deepseek-r1:8b --hidethinking
+ ollama run $model --hidethinking
fi