feat(ai): add brainstorm model & script to unload model from memory
This commit is contained in:
parent
0047f58726
commit
d19dd085f5
5 changed files with 56 additions and 15 deletions
17
ai-brainstorm.sh
Normal file
17
ai-brainstorm.sh
Normal file
|
@ -0,0 +1,17 @@
|
|||
model=brainstorm:8b
|
||||
|
||||
if ! [ "$2" = "" ]; then
|
||||
if [ "$2" = "sleep" ]; then
|
||||
ollama stop $model
|
||||
else
|
||||
start_time=$(date +%s%N)
|
||||
ollama run $model "$@"
|
||||
end_time=$(date +%s%N)
|
||||
duration=$((end_time - start_time))
|
||||
duration_ms=$(echo "scale=3; $duration / 1000000" | bc)
|
||||
duration_s=$(echo "scale=3; $duration_ms / 1000" | bc)
|
||||
echo "Model $model took $duration_s s"
|
||||
fi
|
||||
else
|
||||
ollama run $model
|
||||
fi
|
11
ai-coder.sh
11
ai-coder.sh
|
@ -1,10 +1,11 @@
|
|||
# Load config
|
||||
. ${HOME}/ayo.conf
|
||||
|
||||
model=qwen3-coder:30b
|
||||
model=coder:30b
|
||||
|
||||
if ! [ "$2" = "" ]; then
|
||||
ollama run $model "%coder_prompt...<br /><hr />beginning prompt...<br /></hr /> $@" --hidethinking
|
||||
if [ "$2" = "sleep" ]; then
|
||||
ollama stop $model
|
||||
else
|
||||
ollama run $model "$@" --hidethinking
|
||||
fi
|
||||
else
|
||||
ollama run $model --hidethinking
|
||||
fi
|
||||
|
|
38
ai.sh
38
ai.sh
|
@ -1,17 +1,37 @@
|
|||
# Load config
|
||||
. ${HOME}/ayo.conf
|
||||
|
||||
# model=deepseek-r1:8b
|
||||
model=qwen3-coder:30b
|
||||
model=helper:8b
|
||||
|
||||
if ! [ "$2" = "" ]; then
|
||||
if [ "$2" = "wake" ]; then
|
||||
if [ "$2" = "open-webui" ]; then
|
||||
. $HOME/open-webui/.venv/bin/activate
|
||||
open-webui serve
|
||||
python --version
|
||||
deactivate
|
||||
|
||||
elif [ "$2" = "wake" ]; then
|
||||
. $HOME/llm_env/bin/activate
|
||||
. $HOME/llama-cpp/env.conf
|
||||
. $HOME/intel/oneapi/setvars.sh
|
||||
|
||||
export OLLAMA_NUM_GPU=999
|
||||
export no_proxy=localhost,127.0.0.1
|
||||
export ZES_ENABLE_SYSMAN=1
|
||||
source $HOME/intel/oneapi/setvars.sh
|
||||
export SYCL_CACHE_PERSISTENT=1
|
||||
|
||||
$HOME/llama-cpp/ollama serve
|
||||
python --version
|
||||
deactivate
|
||||
|
||||
echo $ZES_ENABLE_SYSMAN
|
||||
echo $SYCL_CACHE_PERSISTENT
|
||||
elif [ "$2" = "sleep" ]; then
|
||||
ollama stop $model
|
||||
else
|
||||
ollama run $model "$chat_prompt...<br /><hr />beginning prompt...<br /></hr /> $@" --hidethinking
|
||||
start_time=$(date +%s%N)
|
||||
ollama run $model "$@" --hidethinking
|
||||
end_time=$(date +%s%N)
|
||||
duration=$((end_time - start_time))
|
||||
duration_ms=$(echo "scale=3; $duration / 1000000" | bc)
|
||||
duration_s=$(echo "scale=3; $duration_ms / 1000" | bc)
|
||||
echo "Model $model took $duration_s s"
|
||||
fi
|
||||
else
|
||||
ollama run $model --hidethinking
|
||||
|
|
3
ayo.sh
3
ayo.sh
|
@ -70,6 +70,9 @@ case $1 in
|
|||
|
||||
## SCRIPTS
|
||||
|
||||
ai-brainstorm)
|
||||
. ${scripts_dir}/ai-brainstorm.sh "$@"
|
||||
;;
|
||||
ai-coder)
|
||||
. ${scripts_dir}/ai-coder.sh "$@"
|
||||
;;
|
||||
|
|
|
@ -42,7 +42,7 @@ function main() {
|
|||
}
|
||||
|
||||
start_time=$(date +%s%N)
|
||||
main
|
||||
main $@
|
||||
end_time=$(date +%s%N)
|
||||
duration=$((end_time - start_time))
|
||||
duration_ms=$(echo "scale=3; $duration / 1000000" | bc)
|
||||
|
|
Loading…
Reference in a new issue