From 99e36a7a0def529c3924c54156704044da3095d7 Mon Sep 17 00:00:00 2001 From: Svjatoslav Agejenko Date: Thu, 25 Dec 2025 23:25:41 +0200 Subject: [PATCH] Update documentation and examples to replace `llama-cli` with `llama-completion` executable. --- doc/examples/alyverkko-cli.yaml | 2 +- doc/index.org | 27 ++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/doc/examples/alyverkko-cli.yaml b/doc/examples/alyverkko-cli.yaml index b84f709..cdae0b2 100644 --- a/doc/examples/alyverkko-cli.yaml +++ b/doc/examples/alyverkko-cli.yaml @@ -1,7 +1,7 @@ tasks_directory: "/home/john/AI/tasks" models_directory: "/home/john/AI/models" skills_directory: "/home/john/.config/alyverkko-cli/skills" -llama_cli_path: "/home/john/AI/llama.cpp/build/bin/llama-cli" +llama_cli_path: "/home/john/AI/llama.cpp/build/bin/llama-completion" default_temperature: 0.7 diff --git a/doc/index.org b/doc/index.org index 30e46bf..71bce0c 100644 --- a/doc/index.org +++ b/doc/index.org @@ -414,7 +414,7 @@ on consumer hardware - a 4-bit quantized 70B model requires "only" /llama.cpp/ is the open-source inference engine that powers Älyverkko CLI's CPU-based AI processing. It's a critical dependency, in -particular a standalone executable (=llama-cli=) that handles: +particular a standalone executable (=llama-completion=) that handles: - Loading GGUF format models - Tokenization and detokenization @@ -427,13 +427,14 @@ Key features enabling Älyverkko CLI's functionality: - Batched/unattended processing capabilities - Cross-platform compatibility -Älyverkko CLI acts as a sophisticated wrapper around llama.cpp, -managing the complex workflow of task processing while leveraging -llama.cpp's efficient inference capabilities. The =llama_cli_path= -configuration specifies where to find this executable, which must be -built separately from source to optimize for your specific -CPU. Without llama.cpp, Älyverkko CLI couldn't execute any AI tasks - -it's the actual "brain" behind the system. +Älyverkko CLI acts as a sophisticated wrapper around llama.cpp +*llama-completion* executable binary, managing the complex workflow of +task processing while leveraging llama.cpp's efficient inference +capabilities. The =llama_cli_path= configuration specifies where to +find this executable, which must be built separately from source to +optimize for your specific CPU. Without llama.cpp, Älyverkko CLI +couldn't execute any AI tasks - it's the actual "brain" behind the +system. ** Important files and directories *** Configuration File @@ -884,7 +885,7 @@ Configuration file should be placed under current user home directory: - =skills_directory=: Contains YAML skill definition files. -- =llama_cli_path=: Path to llama.cpp's executable. +- =llama_cli_path=: Path to llama.cpp's *llama-completion* executable. **** Generation Parameters @@ -951,10 +952,10 @@ The application is configured using a YAML-formatted configuration file. Below is an example of how the configuration file might look: #+begin_src yaml - tasks_directory: "/home/user/AI/tasks" - models_directory: "/home/user/AI/models" - skills_directory: "/home/user/AI/skills" - llama_cli_path: "/home/user/AI/llama.cpp/build/bin/llama-cli" + tasks_directory: "/home/john/AI/tasks" + models_directory: "/home/john/AI/models" + skills_directory: "/home/john/AI/skills" + llama_cli_path: "/home/john/AI/llama.cpp/build/bin/llama-completion" # Generation parameters default_temperature: 0.7 -- 2.20.1