From 38297faed2999cae34aa88e349e8e5dfc9c8e8ee Mon Sep 17 00:00:00 2001 From: Mario Candela Date: Sun, 16 Feb 2025 22:48:59 +0100 Subject: [PATCH] Feat: Refactoring LLM Plugin, update docs. (#165) Refactoring LLM Plugin, update docs. --- README.md | 13 ++++--- configurations/services/ssh-2222.yaml | 6 ++- parser/configurations_parser.go | 1 + parser/configurations_parser_test.go | 2 + plugins/llm-integration.go | 40 ++++++++++---------- plugins/llm-integration_test.go | 54 ++++++++++++++++----------- protocols/strategies/http.go | 7 ++-- protocols/strategies/ssh.go | 17 +++++---- 8 files changed, 82 insertions(+), 58 deletions(-) diff --git a/README.md b/README.md index 90c1f1c..4db2c2d 100644 --- a/README.md +++ b/README.md @@ -211,9 +211,9 @@ commands: #### Example SSH Honeypot -###### Honeypot LLM Honeypots +###### LLM Honeypots -Example with OpenAI GPT-4: +Follow a SSH LLM Honeypot using OpenAI as provider LLM: ```yaml apiVersion: "v1" @@ -228,11 +228,12 @@ serverName: "ubuntu" passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$" deadlineTimeoutSeconds: 60 plugin: - llmModel: "gpt4-o" + llmProvider: "openai" + llmModel: "gpt4-o" #Models https://platform.openai.com/docs/models openAISecretKey: "sk-proj-123456" ``` -Example with Ollama Llama3: +Examples with local Ollama instance using model codellama:7b: ```yaml apiVersion: "v1" @@ -247,7 +248,8 @@ serverName: "ubuntu" passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$" deadlineTimeoutSeconds: 60 plugin: - llmModel: "llama3" + llmProvider: "ollama" + llmModel: "codellama:7b" #Models https://ollama.com/search host: "http://example.com/api/chat" #default http://localhost:11434/api/chat ``` Example with custom prompt: @@ -265,6 +267,7 @@ serverName: "ubuntu" passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$" deadlineTimeoutSeconds: 60 plugin: + llmProvider: "openai" llmModel: "gpt4-o" openAISecretKey: "sk-proj-123456" prompt: "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block." diff --git a/configurations/services/ssh-2222.yaml b/configurations/services/ssh-2222.yaml index 437b35d..53c5447 100644 --- a/configurations/services/ssh-2222.yaml +++ b/configurations/services/ssh-2222.yaml @@ -7,7 +7,9 @@ commands: plugin: "LLMHoneypot" serverVersion: "OpenSSH" serverName: "ubuntu" -passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$" +passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456|1234)$" deadlineTimeoutSeconds: 6000 plugin: - llmModel: "llama3" \ No newline at end of file + llmProvider: "openai" + llmModel: "gpt4-o" + openAISecretKey: "sk-proj-12345" \ No newline at end of file diff --git a/parser/configurations_parser.go b/parser/configurations_parser.go index 9d14d30..3b6db89 100644 --- a/parser/configurations_parser.go +++ b/parser/configurations_parser.go @@ -52,6 +52,7 @@ type Plugin struct { OpenAISecretKey string `yaml:"openAISecretKey"` Host string `yaml:"host"` LLMModel string `yaml:"llmModel"` + LLMProvider string `yaml:"llmProvider"` Prompt string `yaml:"prompt"` } diff --git a/parser/configurations_parser_test.go b/parser/configurations_parser_test.go index 91e6e1d..1ba472e 100644 --- a/parser/configurations_parser_test.go +++ b/parser/configurations_parser_test.go @@ -59,6 +59,7 @@ commands: plugin: openAISecretKey: "qwerty" llmModel: "llama3" + llmProvider: "ollama" host: "localhost:1563" prompt: "hello world" `) @@ -135,6 +136,7 @@ func TestReadConfigurationsServicesValid(t *testing.T) { assert.Equal(t, firstBeelzebubServiceConfiguration.Commands[0].Headers[0], "Content-Type: text/html") assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.OpenAISecretKey, "qwerty") assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMModel, "llama3") + assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMProvider, "ollama") assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Host, "localhost:1563") assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Prompt, "hello world") assert.Equal(t, firstBeelzebubServiceConfiguration.TLSCertPath, "/tmp/cert.crt") diff --git a/plugins/llm-integration.go b/plugins/llm-integration.go index 712825c..b109c7c 100644 --- a/plugins/llm-integration.go +++ b/plugins/llm-integration.go @@ -8,13 +8,14 @@ import ( "github.com/mariocandela/beelzebub/v3/tracer" log "github.com/sirupsen/logrus" "regexp" + "strings" ) const ( systemPromptVirtualizeLinuxTerminal = "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block. Do not provide note. Do not provide explanations or type commands unless explicitly instructed by the user. Your entire response/output is going to consist of a simple text with \n for new line, and you will NOT wrap it within string md markers" systemPromptVirtualizeHTTPServer = "You will act as an unsecure HTTP Server with multiple vulnerability like aws and git credentials stored into root http directory. The user will send HTTP requests, and you are to reply with what the server should show. Do not provide explanations or type commands unless explicitly instructed by the user." LLMPluginName = "LLMHoneypot" - openAIGPTEndpoint = "https://api.openai.com/v1/chat/completions" + openAIEndpoint = "https://api.openai.com/v1/chat/completions" ollamaEndpoint = "http://localhost:11434/api/chat" ) @@ -23,7 +24,8 @@ type LLMHoneypot struct { OpenAIKey string client *resty.Client Protocol tracer.Protocol - Model LLMModel + Provider LLMProvider + Model string Host string CustomPrompt string } @@ -71,21 +73,21 @@ func (role Role) String() string { return [...]string{"system", "user", "assistant"}[role] } -type LLMModel int +type LLMProvider int const ( - LLAMA3 LLMModel = iota - GPT4O + Ollama LLMProvider = iota + OpenAI ) -func FromStringToLLMModel(llmModel string) (LLMModel, error) { - switch llmModel { - case "llama3": - return LLAMA3, nil - case "gpt4-o": - return GPT4O, nil +func FromStringToLLMProvider(llmProvider string) (LLMProvider, error) { + switch strings.ToLower(llmProvider) { + case "ollama": + return Ollama, nil + case "openai": + return OpenAI, nil default: - return -1, fmt.Errorf("model %s not found", llmModel) + return -1, fmt.Errorf("provider %s not found, valid providers: ollama, openai", llmProvider) } } @@ -153,7 +155,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error) var err error requestJson, err := json.Marshal(Request{ - Model: "gpt-4o", + Model: llmHoneypot.Model, Messages: messages, Stream: false, }) @@ -166,7 +168,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error) } if llmHoneypot.Host == "" { - llmHoneypot.Host = openAIGPTEndpoint + llmHoneypot.Host = openAIEndpoint } log.Debug(string(requestJson)) @@ -192,7 +194,7 @@ func (llmHoneypot *LLMHoneypot) ollamaCaller(messages []Message) (string, error) var err error requestJson, err := json.Marshal(Request{ - Model: "llama3", + Model: llmHoneypot.Model, Messages: messages, Stream: false, }) @@ -229,13 +231,13 @@ func (llmHoneypot *LLMHoneypot) ExecuteModel(command string) (string, error) { return "", err } - switch llmHoneypot.Model { - case LLAMA3: + switch llmHoneypot.Provider { + case Ollama: return llmHoneypot.ollamaCaller(prompt) - case GPT4O: + case OpenAI: return llmHoneypot.openAICaller(prompt) default: - return "", errors.New("no model selected") + return "", fmt.Errorf("provider %d not found, valid providers: ollama, openai", llmHoneypot.Provider) } } diff --git a/plugins/llm-integration_test.go b/plugins/llm-integration_test.go index 332ee2f..7b3096b 100644 --- a/plugins/llm-integration_test.go +++ b/plugins/llm-integration_test.go @@ -85,7 +85,8 @@ func TestBuildExecuteModelFailValidation(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "", Protocol: tracer.SSH, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -101,7 +102,7 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) { defer httpmock.DeactivateAndReset() // Given - httpmock.RegisterMatcherResponder("POST", openAIGPTEndpoint, + httpmock.RegisterMatcherResponder("POST", openAIEndpoint, httpmock.BodyContainsString("hello world"), func(req *http.Request) (*http.Response, error) { resp, err := httpmock.NewJsonResponse(200, &Response{ @@ -125,7 +126,8 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "sdjdnklfjndslkjanfk", Protocol: tracer.HTTP, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, CustomPrompt: "hello world", } @@ -146,7 +148,8 @@ func TestBuildExecuteModelFailValidationStrategyType(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "", Protocol: tracer.TCP, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -161,7 +164,8 @@ func TestBuildExecuteModelFailValidationModelType(t *testing.T) { llmHoneypot := LLMHoneypot{ Histories: make([]Message, 0), Protocol: tracer.SSH, - Model: 5, + Model: "llama3", + Provider: 5, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -179,7 +183,7 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) { defer httpmock.DeactivateAndReset() // Given - httpmock.RegisterResponder("POST", openAIGPTEndpoint, + httpmock.RegisterResponder("POST", openAIEndpoint, func(req *http.Request) (*http.Response, error) { resp, err := httpmock.NewJsonResponse(200, &Response{ Choices: []Choice{ @@ -202,7 +206,8 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "sdjdnklfjndslkjanfk", Protocol: tracer.SSH, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -240,7 +245,8 @@ func TestBuildExecuteModelSSHWithResultsLLama(t *testing.T) { llmHoneypot := LLMHoneypot{ Histories: make([]Message, 0), Protocol: tracer.SSH, - Model: LLAMA3, + Model: "llama3", + Provider: Ollama, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -260,7 +266,7 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) { defer httpmock.DeactivateAndReset() // Given - httpmock.RegisterResponder("POST", openAIGPTEndpoint, + httpmock.RegisterResponder("POST", openAIEndpoint, func(req *http.Request) (*http.Response, error) { resp, err := httpmock.NewJsonResponse(200, &Response{ Choices: []Choice{}, @@ -276,7 +282,8 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "sdjdnklfjndslkjanfk", Protocol: tracer.SSH, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -295,7 +302,7 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) { defer httpmock.DeactivateAndReset() // Given - httpmock.RegisterResponder("POST", openAIGPTEndpoint, + httpmock.RegisterResponder("POST", openAIEndpoint, func(req *http.Request) (*http.Response, error) { resp, err := httpmock.NewJsonResponse(200, &Response{ Choices: []Choice{ @@ -318,7 +325,8 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "sdjdnklfjndslkjanfk", Protocol: tracer.HTTP, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -338,7 +346,7 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) { defer httpmock.DeactivateAndReset() // Given - httpmock.RegisterResponder("POST", openAIGPTEndpoint, + httpmock.RegisterResponder("POST", openAIEndpoint, func(req *http.Request) (*http.Response, error) { resp, err := httpmock.NewJsonResponse(200, &Response{ Choices: []Choice{}, @@ -354,7 +362,8 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) { Histories: make([]Message, 0), OpenAIKey: "sdjdnklfjndslkjanfk", Protocol: tracer.HTTP, - Model: GPT4O, + Model: "gpt4-o", + Provider: OpenAI, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -368,16 +377,16 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) { } func TestFromString(t *testing.T) { - model, err := FromStringToLLMModel("llama3") + model, err := FromStringToLLMProvider("openai") assert.Nil(t, err) - assert.Equal(t, LLAMA3, model) + assert.Equal(t, OpenAI, model) - model, err = FromStringToLLMModel("gpt4-o") + model, err = FromStringToLLMProvider("ollama") assert.Nil(t, err) - assert.Equal(t, GPT4O, model) + assert.Equal(t, Ollama, model) - model, err = FromStringToLLMModel("beelzebub-model") - assert.Errorf(t, err, "model beelzebub-model not found") + model, err = FromStringToLLMProvider("beelzebub-model") + assert.Errorf(t, err, "provider beelzebub-model not found") } func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) { @@ -404,7 +413,7 @@ func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) { llmHoneypot := LLMHoneypot{ Histories: make([]Message, 0), Protocol: tracer.SSH, - Model: LLAMA3, + Model: "llama3", } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) @@ -442,7 +451,8 @@ func TestBuildExecuteModelSSHWithoutQuotesSection(t *testing.T) { llmHoneypot := LLMHoneypot{ Histories: make([]Message, 0), Protocol: tracer.SSH, - Model: LLAMA3, + Model: "llama3", + Provider: Ollama, } openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot) diff --git a/protocols/strategies/http.go b/protocols/strategies/http.go index 7647741..fdb80e7 100644 --- a/protocols/strategies/http.go +++ b/protocols/strategies/http.go @@ -37,10 +37,10 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz if command.Plugin == plugins.LLMPluginName { - llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel) + llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider) if err != nil { - log.Errorf("Error fromString: %s", err.Error()) + log.Errorf("Error: %s", err.Error()) responseHTTPBody = "404 Not Found!" } @@ -49,7 +49,8 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey, Protocol: tracer.HTTP, Host: beelzebubServiceConfiguration.Plugin.Host, - Model: llmModel, + Model: beelzebubServiceConfiguration.Plugin.LLMModel, + Provider: llmProvider, CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt, } diff --git a/protocols/strategies/ssh.go b/protocols/strategies/ssh.go index 22eb471..63a916a 100644 --- a/protocols/strategies/ssh.go +++ b/protocols/strategies/ssh.go @@ -44,11 +44,12 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze if command.Plugin == plugins.LLMPluginName { - llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel) + llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider) if err != nil { - log.Errorf("Error fromString: %s", err.Error()) + log.Errorf("Error: %s", err.Error()) commandOutput = "command not found" + llmProvider = plugins.OpenAI } llmHoneypot := plugins.LLMHoneypot{ @@ -56,7 +57,8 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey, Protocol: tracer.SSH, Host: beelzebubServiceConfiguration.Plugin.Host, - Model: llmModel, + Model: beelzebubServiceConfiguration.Plugin.LLMProvider, + Provider: llmProvider, CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt, } @@ -130,11 +132,11 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze if command.Plugin == plugins.LLMPluginName { - llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel) + llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider) if err != nil { - log.Errorf("Error fromString: %s", err.Error()) - commandOutput = "command not found" + log.Errorf("Error: %s, fallback OpenAI", err.Error()) + llmProvider = plugins.OpenAI } llmHoneypot := plugins.LLMHoneypot{ @@ -142,7 +144,8 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey, Protocol: tracer.SSH, Host: beelzebubServiceConfiguration.Plugin.Host, - Model: llmModel, + Model: beelzebubServiceConfiguration.Plugin.LLMModel, + Provider: llmProvider, CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt, }