Compare commits

..

7 Commits

Author SHA1 Message Date
ef07ca1203 Feat: continuous delivery pipeline add latest tag (#174)
Feat: continuous delivery pipeline add latest tag

Signed-off-by: James Hodgkinson <james@terminaloutcomes.com>
2025-03-02 05:30:36 +01:00
1f59685530 Feat: Improve HTTP Headers serializer json log #172 (#173)
* Changed Event struct, field headers from string to map[string][]string

* Add integration test for http Headers
2025-03-01 12:31:34 +01:00
f658a26b32 Feat: Update docker-image.yml to add multi-platform support (#171)
* Update docker-image.yml

Adds multi-arch support

Signed-off-by: James Hodgkinson <james@terminaloutcomes.com>
Co-authored-by: Mario Candela <mario.candela.personal@gmail.com>
2025-02-28 11:36:15 +01:00
3fb8a667b3 Update codeql.yml
Upgrade codeQL from v2 to v3

Signed-off-by: Mario Candela <mario.candela.personal@gmail.com>
2025-02-24 08:16:34 +01:00
8963bbc86d Fix: mapping LLMModel for SSH inline, removed old comments on docker-c… (#168)
Fix mapping LLMModel for SSH inline, removed old comments on docker-compose.yml
2025-02-20 22:41:28 +01:00
44ec44ea5c Fix LLM model name typo 2025-02-20 18:20:17 +01:00
38297faed2 Feat: Refactoring LLM Plugin, update docs. (#165)
Refactoring LLM Plugin, update docs.
2025-02-16 22:48:59 +01:00
14 changed files with 105 additions and 90 deletions

View File

@ -27,7 +27,7 @@ jobs:
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
@ -35,6 +35,6 @@ jobs:
run: go build ./...
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@ -1,31 +1,32 @@
---
name: Docker Hub Image
on:
push:
tags:
- 'v*.*.*'
jobs:
CD:
runs-on: ubuntu-latest
steps:
-
name: Checkout
- name: Checkout
uses: actions/checkout@v3
-
name: Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
-
name: Set up Docker Buildx
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Build and push
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
file: ./Dockerfile
push: true
tags: m4r10/beelzebub:${{ github.ref_name }}
tags: |
m4r10/beelzebub:${{ github.ref_name }}
m4r10/beelzebub:latest
platforms: linux/amd64,linux/arm64

View File

@ -211,9 +211,9 @@ commands:
#### Example SSH Honeypot
###### Honeypot LLM Honeypots
###### LLM Honeypots
Example with OpenAI GPT-4:
Follow a SSH LLM Honeypot using OpenAI as provider LLM:
```yaml
apiVersion: "v1"
@ -228,11 +228,12 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmModel: "gpt4-o"
llmProvider: "openai"
llmModel: "gpt-4o" #Models https://platform.openai.com/docs/models
openAISecretKey: "sk-proj-123456"
```
Example with Ollama Llama3:
Examples with local Ollama instance using model codellama:7b:
```yaml
apiVersion: "v1"
@ -247,7 +248,8 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmModel: "llama3"
llmProvider: "ollama"
llmModel: "codellama:7b" #Models https://ollama.com/search
host: "http://example.com/api/chat" #default http://localhost:11434/api/chat
```
Example with custom prompt:
@ -265,7 +267,8 @@ serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
deadlineTimeoutSeconds: 60
plugin:
llmModel: "gpt4-o"
llmProvider: "openai"
llmModel: "gpt-4o"
openAISecretKey: "sk-proj-123456"
prompt: "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block."
```

View File

@ -7,7 +7,9 @@ commands:
plugin: "LLMHoneypot"
serverVersion: "OpenSSH"
serverName: "ubuntu"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$"
passwordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456|1234)$"
deadlineTimeoutSeconds: 6000
plugin:
llmModel: "llama3"
llmProvider: "openai"
llmModel: "gpt-4o"
openAISecretKey: "sk-proj-12345"

View File

@ -3,17 +3,16 @@ version: "3.9"
services:
beelzebub:
build: .
#network_mode: host # Not work on Mac OS
container_name: beelzebub
restart: always
ports: # Remove me, if you use configuration network_mode: host
ports:
- "22:22"
- "2222:2222"
- "8080:8080"
- "8081:8081"
- "80:80"
- "3306:3306"
- "2112:2112" # Prometheus openmetrics
- "2112:2112" #Prometheus Open Metrics
environment:
RABBITMQ_URI: ${RABBITMQ_URI}
volumes:

View File

@ -67,8 +67,11 @@ func (suite *IntegrationTestSuite) TestInvokeHTTPHoneypot() {
response, err := resty.New().R().
Get(suite.httpHoneypotHost + "/index.php")
response.Header().Del("Date")
suite.Require().NoError(err)
suite.Equal(http.StatusOK, response.StatusCode())
suite.Equal(http.Header{"Content-Length": []string{"15"}, "Content-Type": []string{"text/html"}, "Server": []string{"Apache/2.4.53 (Debian)"}, "X-Powered-By": []string{"PHP/7.4.29"}}, response.Header())
suite.Equal("mocked response", string(response.Body()))
response, err = resty.New().R().

View File

@ -52,6 +52,7 @@ type Plugin struct {
OpenAISecretKey string `yaml:"openAISecretKey"`
Host string `yaml:"host"`
LLMModel string `yaml:"llmModel"`
LLMProvider string `yaml:"llmProvider"`
Prompt string `yaml:"prompt"`
}

View File

@ -59,6 +59,7 @@ commands:
plugin:
openAISecretKey: "qwerty"
llmModel: "llama3"
llmProvider: "ollama"
host: "localhost:1563"
prompt: "hello world"
`)
@ -135,6 +136,7 @@ func TestReadConfigurationsServicesValid(t *testing.T) {
assert.Equal(t, firstBeelzebubServiceConfiguration.Commands[0].Headers[0], "Content-Type: text/html")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.OpenAISecretKey, "qwerty")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMModel, "llama3")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.LLMProvider, "ollama")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Host, "localhost:1563")
assert.Equal(t, firstBeelzebubServiceConfiguration.Plugin.Prompt, "hello world")
assert.Equal(t, firstBeelzebubServiceConfiguration.TLSCertPath, "/tmp/cert.crt")

View File

@ -85,7 +85,7 @@ func TestGetHoneypotsConfigurationsWithResults(t *testing.T) {
resp, err := httpmock.NewJsonResponse(200, &[]HoneypotConfigResponseDTO{
{
ID: "123456",
Config: "apiVersion: \"v1\"\nprotocol: \"ssh\"\naddress: \":2222\"\ndescription: \"SSH interactive ChatGPT\"\ncommands:\n - regex: \"^(.+)$\"\n plugin: \"LLMHoneypot\"\nserverVersion: \"OpenSSH\"\nserverName: \"ubuntu\"\npasswordRegex: \"^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$\"\ndeadlineTimeoutSeconds: 60\nplugin:\n llmModel: \"gpt4-o\"\n openAISecretKey: \"1234\"\n",
Config: "apiVersion: \"v1\"\nprotocol: \"ssh\"\naddress: \":2222\"\ndescription: \"SSH interactive ChatGPT\"\ncommands:\n - regex: \"^(.+)$\"\n plugin: \"LLMHoneypot\"\nserverVersion: \"OpenSSH\"\nserverName: \"ubuntu\"\npasswordRegex: \"^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$\"\ndeadlineTimeoutSeconds: 60\nplugin:\n llmModel: \"gpt-4o\"\n openAISecretKey: \"1234\"\n",
TokenID: "1234567",
},
})
@ -120,7 +120,7 @@ func TestGetHoneypotsConfigurationsWithResults(t *testing.T) {
PasswordRegex: "^(root|qwerty|Smoker666|123456|jenkins|minecraft|sinus|alex|postgres|Ly123456)$",
DeadlineTimeoutSeconds: 60,
Plugin: parser.Plugin{
LLMModel: "gpt4-o",
LLMModel: "gpt-4o",
OpenAISecretKey: "1234",
},
},

View File

@ -8,13 +8,14 @@ import (
"github.com/mariocandela/beelzebub/v3/tracer"
log "github.com/sirupsen/logrus"
"regexp"
"strings"
)
const (
systemPromptVirtualizeLinuxTerminal = "You will act as an Ubuntu Linux terminal. The user will type commands, and you are to reply with what the terminal should show. Your responses must be contained within a single code block. Do not provide note. Do not provide explanations or type commands unless explicitly instructed by the user. Your entire response/output is going to consist of a simple text with \n for new line, and you will NOT wrap it within string md markers"
systemPromptVirtualizeHTTPServer = "You will act as an unsecure HTTP Server with multiple vulnerability like aws and git credentials stored into root http directory. The user will send HTTP requests, and you are to reply with what the server should show. Do not provide explanations or type commands unless explicitly instructed by the user."
LLMPluginName = "LLMHoneypot"
openAIGPTEndpoint = "https://api.openai.com/v1/chat/completions"
openAIEndpoint = "https://api.openai.com/v1/chat/completions"
ollamaEndpoint = "http://localhost:11434/api/chat"
)
@ -23,7 +24,8 @@ type LLMHoneypot struct {
OpenAIKey string
client *resty.Client
Protocol tracer.Protocol
Model LLMModel
Provider LLMProvider
Model string
Host string
CustomPrompt string
}
@ -71,21 +73,21 @@ func (role Role) String() string {
return [...]string{"system", "user", "assistant"}[role]
}
type LLMModel int
type LLMProvider int
const (
LLAMA3 LLMModel = iota
GPT4O
Ollama LLMProvider = iota
OpenAI
)
func FromStringToLLMModel(llmModel string) (LLMModel, error) {
switch llmModel {
case "llama3":
return LLAMA3, nil
case "gpt4-o":
return GPT4O, nil
func FromStringToLLMProvider(llmProvider string) (LLMProvider, error) {
switch strings.ToLower(llmProvider) {
case "ollama":
return Ollama, nil
case "openai":
return OpenAI, nil
default:
return -1, fmt.Errorf("model %s not found", llmModel)
return -1, fmt.Errorf("provider %s not found, valid providers: ollama, openai", llmProvider)
}
}
@ -153,7 +155,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error)
var err error
requestJson, err := json.Marshal(Request{
Model: "gpt-4o",
Model: llmHoneypot.Model,
Messages: messages,
Stream: false,
})
@ -166,7 +168,7 @@ func (llmHoneypot *LLMHoneypot) openAICaller(messages []Message) (string, error)
}
if llmHoneypot.Host == "" {
llmHoneypot.Host = openAIGPTEndpoint
llmHoneypot.Host = openAIEndpoint
}
log.Debug(string(requestJson))
@ -192,7 +194,7 @@ func (llmHoneypot *LLMHoneypot) ollamaCaller(messages []Message) (string, error)
var err error
requestJson, err := json.Marshal(Request{
Model: "llama3",
Model: llmHoneypot.Model,
Messages: messages,
Stream: false,
})
@ -229,13 +231,13 @@ func (llmHoneypot *LLMHoneypot) ExecuteModel(command string) (string, error) {
return "", err
}
switch llmHoneypot.Model {
case LLAMA3:
switch llmHoneypot.Provider {
case Ollama:
return llmHoneypot.ollamaCaller(prompt)
case GPT4O:
case OpenAI:
return llmHoneypot.openAICaller(prompt)
default:
return "", errors.New("no model selected")
return "", fmt.Errorf("provider %d not found, valid providers: ollama, openai", llmHoneypot.Provider)
}
}

View File

@ -85,7 +85,8 @@ func TestBuildExecuteModelFailValidation(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -101,7 +102,7 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) {
defer httpmock.DeactivateAndReset()
// Given
httpmock.RegisterMatcherResponder("POST", openAIGPTEndpoint,
httpmock.RegisterMatcherResponder("POST", openAIEndpoint,
httpmock.BodyContainsString("hello world"),
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
@ -125,7 +126,8 @@ func TestBuildExecuteModelWithCustomPrompt(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
CustomPrompt: "hello world",
}
@ -146,7 +148,8 @@ func TestBuildExecuteModelFailValidationStrategyType(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "",
Protocol: tracer.TCP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -161,7 +164,8 @@ func TestBuildExecuteModelFailValidationModelType(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: 5,
Model: "llama3",
Provider: 5,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -179,7 +183,7 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) {
defer httpmock.DeactivateAndReset()
// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{
@ -202,7 +206,8 @@ func TestBuildExecuteModelSSHWithResultsOpenAI(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -240,7 +245,8 @@ func TestBuildExecuteModelSSHWithResultsLLama(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
Provider: Ollama,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -260,7 +266,7 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) {
defer httpmock.DeactivateAndReset()
// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{},
@ -276,7 +282,8 @@ func TestBuildExecuteModelSSHWithoutResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.SSH,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -295,7 +302,7 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) {
defer httpmock.DeactivateAndReset()
// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{
@ -318,7 +325,8 @@ func TestBuildExecuteModelHTTPWithResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -338,7 +346,7 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
defer httpmock.DeactivateAndReset()
// Given
httpmock.RegisterResponder("POST", openAIGPTEndpoint,
httpmock.RegisterResponder("POST", openAIEndpoint,
func(req *http.Request) (*http.Response, error) {
resp, err := httpmock.NewJsonResponse(200, &Response{
Choices: []Choice{},
@ -354,7 +362,8 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
Histories: make([]Message, 0),
OpenAIKey: "sdjdnklfjndslkjanfk",
Protocol: tracer.HTTP,
Model: GPT4O,
Model: "gpt4-o",
Provider: OpenAI,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -368,16 +377,16 @@ func TestBuildExecuteModelHTTPWithoutResults(t *testing.T) {
}
func TestFromString(t *testing.T) {
model, err := FromStringToLLMModel("llama3")
model, err := FromStringToLLMProvider("openai")
assert.Nil(t, err)
assert.Equal(t, LLAMA3, model)
assert.Equal(t, OpenAI, model)
model, err = FromStringToLLMModel("gpt4-o")
model, err = FromStringToLLMProvider("ollama")
assert.Nil(t, err)
assert.Equal(t, GPT4O, model)
assert.Equal(t, Ollama, model)
model, err = FromStringToLLMModel("beelzebub-model")
assert.Errorf(t, err, "model beelzebub-model not found")
model, err = FromStringToLLMProvider("beelzebub-model")
assert.Errorf(t, err, "provider beelzebub-model not found")
}
func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) {
@ -404,7 +413,7 @@ func TestBuildExecuteModelSSHWithoutPlaintextSection(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)
@ -442,7 +451,8 @@ func TestBuildExecuteModelSSHWithoutQuotesSection(t *testing.T) {
llmHoneypot := LLMHoneypot{
Histories: make([]Message, 0),
Protocol: tracer.SSH,
Model: LLAMA3,
Model: "llama3",
Provider: Ollama,
}
openAIGPTVirtualTerminal := InitLLMHoneypot(llmHoneypot)

View File

@ -37,10 +37,10 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz
if command.Plugin == plugins.LLMPluginName {
llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel)
llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider)
if err != nil {
log.Errorf("Error fromString: %s", err.Error())
log.Errorf("Error: %s", err.Error())
responseHTTPBody = "404 Not Found!"
}
@ -49,7 +49,8 @@ func (httpStrategy HTTPStrategy) Init(beelzebubServiceConfiguration parser.Beelz
OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey,
Protocol: tracer.HTTP,
Host: beelzebubServiceConfiguration.Plugin.Host,
Model: llmModel,
Model: beelzebubServiceConfiguration.Plugin.LLMModel,
Provider: llmProvider,
CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt,
}
@ -116,7 +117,7 @@ func traceRequest(request *http.Request, tr tracer.Tracer, HoneypotDescription s
HostHTTPRequest: request.Host,
UserAgent: request.UserAgent(),
Cookies: mapCookiesToString(request.Cookies()),
Headers: mapHeaderToString(request.Header),
Headers: request.Header,
Status: tracer.Stateless.String(),
RemoteAddr: request.RemoteAddr,
SourceIp: host,
@ -132,18 +133,6 @@ func traceRequest(request *http.Request, tr tracer.Tracer, HoneypotDescription s
tr.TraceEvent(event)
}
func mapHeaderToString(headers http.Header) string {
headersString := ""
for key := range headers {
for _, values := range headers[key] {
headersString += fmt.Sprintf("[Key: %s, values: %s],", key, values)
}
}
return headersString
}
func mapCookiesToString(cookies []*http.Cookie) string {
cookiesString := ""

View File

@ -44,11 +44,12 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze
if command.Plugin == plugins.LLMPluginName {
llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel)
llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider)
if err != nil {
log.Errorf("Error fromString: %s", err.Error())
log.Errorf("Error: %s", err.Error())
commandOutput = "command not found"
llmProvider = plugins.OpenAI
}
llmHoneypot := plugins.LLMHoneypot{
@ -56,7 +57,8 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze
OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey,
Protocol: tracer.SSH,
Host: beelzebubServiceConfiguration.Plugin.Host,
Model: llmModel,
Model: beelzebubServiceConfiguration.Plugin.LLMModel,
Provider: llmProvider,
CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt,
}
@ -130,11 +132,11 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze
if command.Plugin == plugins.LLMPluginName {
llmModel, err := plugins.FromStringToLLMModel(beelzebubServiceConfiguration.Plugin.LLMModel)
llmProvider, err := plugins.FromStringToLLMProvider(beelzebubServiceConfiguration.Plugin.LLMProvider)
if err != nil {
log.Errorf("Error fromString: %s", err.Error())
commandOutput = "command not found"
log.Errorf("Error: %s, fallback OpenAI", err.Error())
llmProvider = plugins.OpenAI
}
llmHoneypot := plugins.LLMHoneypot{
@ -142,7 +144,8 @@ func (sshStrategy *SSHStrategy) Init(beelzebubServiceConfiguration parser.Beelze
OpenAIKey: beelzebubServiceConfiguration.Plugin.OpenAISecretKey,
Protocol: tracer.SSH,
Host: beelzebubServiceConfiguration.Plugin.Host,
Model: llmModel,
Model: beelzebubServiceConfiguration.Plugin.LLMModel,
Provider: llmProvider,
CustomPrompt: beelzebubServiceConfiguration.Plugin.Prompt,
}

View File

@ -27,7 +27,7 @@ type Event struct {
User string
Password string
Client string
Headers string
Headers map[string][]string
Cookies string
UserAgent string
HostHTTPRequest string