generated from anoadragon453/nio-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsample.config.yaml
85 lines (80 loc) · 3.65 KB
/
sample.config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# Welcome to the sample config file
# Below you will find various config sections and options
# Default values are shown
# The string to prefix messages with to talk to the bot in group chats
command_prefix: "!c"
# Options for connecting to the bot's Matrix account
matrix:
# The Matrix User ID of the bot account
user_id: "@bot:example.com"
# Matrix account password (optional if access token used)
user_password: ""
# Matrix account access token (optional if password used)
#user_token: ""
# The URL of the homeserver to connect to
homeserver_url: https://example.com
# The device ID that is **non pre-existing** device
# If this device ID already exists, messages will be dropped silently in encrypted rooms
device_id: ABCDEFGHIJ
# What to name the logged in device
device_name: llm-to-matrix
storage:
# The database connection string
# For SQLite3, this would look like:
# database: "sqlite://bot.db"
# For Postgres, this would look like:
# database: "postgres://username:password@localhost/dbname?sslmode=disable"
database: "sqlite://bot.db"
# The path to a directory for internal bot storage
# containing encryption keys, sync tokens, etc.
store_path: "./store"
# Logging setup
logging:
# Logging level
# Allowed levels are 'INFO', 'WARNING', 'ERROR', 'DEBUG' where DEBUG is most verbose
level: INFO
# Configure logging to a file
file_logging:
# Whether logging to a file is enabled
enabled: false
# The path to the file to log to. May be relative or absolute
filepath: bot.log
# Configure logging to the console output
console_logging:
# Whether logging to the console is enabled
enabled: true
# Default llm values (based on ollama params)
llm:
# Defines the name of the LLM instance
llm_name: "Chatbot"
# Specifies the base URL where the LLM service is hosted.
llm_base_url: http://localhost:11434/ # http://host.docker.internal:11434/
# This is the endpoint suffix used for generating responses. When combined with the base URL, it forms the complete URL for the generate API.
llm_url_suffix: "/api/generate"
# Endpoint suffix for retrieving tags or metadata associated with the LLM responses.
llm_tags_suffix: "/api/tags"
# Sets the temperature parameter for the LLM. Temperature controls randomness in response generation;
llm_param_temp: 0.6
# Specifies the number of context tokens the model considers when generating a response.
llm_param_num_ctx: 8192
# Defines the number of tokens the model should predict. -1 often implies no specific limit, allowing the model to determine the appropriate response length.
llm_param_num_predict: -1
# Sets a mechanism to penalize or prevent the repetition of the last N tokens
llm_param_repeat_last_n: 64
# Adjusts the penalty for repeating content. A value above 1 (like 1.2) discourages repetition.
llm_param_repeat_penalty: 1.2
# Sets the random seed for response generation, ensuring reproducibility of responses if the same input is given. You can also set -1.
llm_param_seed: 42
# The top-K sampling parameter, which restricts the model's predictions to the top 40 most likely next tokens.
llm_param_top_k: 40
# The top-P or nucleus sampling parameter, where the model considers a subset of likely tokens that cumulatively make up 95% probability.
llm_param_top_p: 0.95
# Specifies the model being used
llm_model: "mistral-7b-instruct:latest"
# Defines a list of stop tokens or sequences. These are used as signals to end the response generation.
llm_param_stop:
- "<|im_start|>user"
- "</s>"
- "<|user|>"
# This line defines a template for formatting messages sent to the LLM.
llm_msg_template: "{message}"