mirror of
https://github.com/splunk/DECEIVE.git
synced 2025-07-02 00:57:26 -04:00
Added 'temperature' parameter to control randomness in LLM responses.
Lower temps mean less randomness in the responses, which increase the chances of consistency between sessions. Not a guarantee, though.
This commit is contained in:
@ -53,6 +53,12 @@ model_name = gpt-4o
|
||||
#llm_provider = gemini
|
||||
#model_name = gemini-1.5-pro
|
||||
|
||||
# Temperature controls randomness in LLM responses. Values usually range from 0.0 to 2.0.
|
||||
# Lower values (e.g., 0.2) make responses more focused and deterministic.
|
||||
# Higher values (e.g., 0.8) make responses more creative and variable.
|
||||
# Default is 0.2.
|
||||
temperature = 0.2
|
||||
|
||||
# The maximum number of tokens to send to the LLM backend in a single
|
||||
# request. This includes the message history for the session, so should
|
||||
# be fairly high. Not all models support large token counts, so be sure
|
||||
|
Reference in New Issue
Block a user