-
Notifications
You must be signed in to change notification settings - Fork 160
/
threat_model.py
260 lines (208 loc) · 9.55 KB
/
threat_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
import json
import requests
from anthropic import Anthropic
from mistralai import Mistral, UserMessage
from openai import OpenAI, AzureOpenAI
import streamlit as st
import google.generativeai as genai
# Function to convert JSON to Markdown for display.
def json_to_markdown(threat_model, improvement_suggestions):
markdown_output = "## Threat Model\n\n"
# Start the markdown table with headers
markdown_output += "| Threat Type | Scenario | Potential Impact |\n"
markdown_output += "|-------------|----------|------------------|\n"
# Fill the table rows with the threat model data
for threat in threat_model:
markdown_output += f"| {threat['Threat Type']} | {threat['Scenario']} | {threat['Potential Impact']} |\n"
markdown_output += "\n\n## Improvement Suggestions\n\n"
for suggestion in improvement_suggestions:
markdown_output += f"- {suggestion}\n"
return markdown_output
# Function to create a prompt for generating a threat model
def create_threat_model_prompt(app_type, authentication, internet_facing, sensitive_data, app_input):
prompt = f"""
Act as a cyber security expert with more than 20 years experience of using the STRIDE threat modelling methodology to produce comprehensive threat models for a wide range of applications. Your task is to analyze the provided code summary, README content, and application description to produce a list of specific threats for the application.
Pay special attention to the README content as it often provides valuable context about the project's purpose, architecture, and potential security considerations.
For each of the STRIDE categories (Spoofing, Tampering, Repudiation, Information Disclosure, Denial of Service, and Elevation of Privilege), list multiple (3 or 4) credible threats if applicable. Each threat scenario should provide a credible scenario in which the threat could occur in the context of the application. It is very important that your responses are tailored to reflect the details you are given.
When providing the threat model, use a JSON formatted response with the keys "threat_model" and "improvement_suggestions". Under "threat_model", include an array of objects with the keys "Threat Type", "Scenario", and "Potential Impact".
Under "improvement_suggestions", include an array of strings with suggestions on how the developers can improve their code or application description to enhance security.
APPLICATION TYPE: {app_type}
AUTHENTICATION METHODS: {authentication}
INTERNET FACING: {internet_facing}
SENSITIVE DATA: {sensitive_data}
CODE SUMMARY, README CONTENT, AND APPLICATION DESCRIPTION:
{app_input}
Example of expected JSON response format:
{{
"threat_model": [
{{
"Threat Type": "Spoofing",
"Scenario": "Example Scenario 1",
"Potential Impact": "Example Potential Impact 1"
}},
{{
"Threat Type": "Spoofing",
"Scenario": "Example Scenario 2",
"Potential Impact": "Example Potential Impact 2"
}},
// ... more threats
],
"improvement_suggestions": [
"Example improvement suggestion 1.",
"Example improvement suggestion 2.",
// ... more suggestions
]
}}
"""
return prompt
def create_image_analysis_prompt():
prompt = """
You are a Senior Solution Architect tasked with explaining the following architecture diagram to
a Security Architect to support the threat modelling of the system.
In order to complete this task you must:
1. Analyse the diagram
2. Explain the system architecture to the Security Architect. Your explanation should cover the key
components, their interactions, and any technologies used.
Provide a direct explanation of the diagram in a clear, structured format, suitable for a professional
discussion.
IMPORTANT INSTRUCTIONS:
- Do not include any words before or after the explanation itself. For example, do not start your
explanation with "The image shows..." or "The diagram shows..." just start explaining the key components
and other relevant details.
- Do not infer or speculate about information that is not visible in the diagram. Only provide information that can be
directly determined from the diagram itself.
"""
return prompt
# Function to get analyse uploaded architecture diagrams.
def get_image_analysis(api_key, model_name, prompt, base64_image):
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt
},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}
}
]
}
]
payload = {
"model": model_name,
"messages": messages,
"max_tokens": 4000
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
# Log the response for debugging
try:
response.raise_for_status() # Raise an HTTPError for bad responses
response_content = response.json()
return response_content
except requests.exceptions.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}") # HTTP error
except Exception as err:
print(f"Other error occurred: {err}") # Other errors
print(f"Response content: {response.content}") # Log the response content for further inspection
return None
# Function to get threat model from the GPT response.
def get_threat_model(api_key, model_name, prompt):
client = OpenAI(api_key=api_key)
response = client.chat.completions.create(
model=model_name,
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
{"role": "user", "content": prompt}
],
max_tokens=4000,
)
# Convert the JSON string in the 'content' field to a Python dictionary
response_content = json.loads(response.choices[0].message.content)
return response_content
# Function to get threat model from the Azure OpenAI response.
def get_threat_model_azure(azure_api_endpoint, azure_api_key, azure_api_version, azure_deployment_name, prompt):
client = AzureOpenAI(
azure_endpoint = azure_api_endpoint,
api_key = azure_api_key,
api_version = azure_api_version,
)
response = client.chat.completions.create(
model = azure_deployment_name,
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
{"role": "user", "content": prompt}
]
)
# Convert the JSON string in the 'content' field to a Python dictionary
response_content = json.loads(response.choices[0].message.content)
return response_content
# Function to get threat model from the Google response.
def get_threat_model_google(google_api_key, google_model, prompt):
genai.configure(api_key=google_api_key)
model = genai.GenerativeModel(
google_model,
generation_config={"response_mime_type": "application/json"})
response = model.generate_content(
prompt,
safety_settings={
'DANGEROUS': 'block_only_high' # Set safety filter to allow generation of threat models
})
try:
# Access the JSON content from the 'parts' attribute of the 'content' object
response_content = json.loads(response.candidates[0].content.parts[0].text)
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {str(e)}")
print("Raw JSON string:")
print(response.candidates[0].content.parts[0].text)
return None
return response_content
# Function to get threat model from the Mistral response.
def get_threat_model_mistral(mistral_api_key, mistral_model, prompt):
client = Mistral(api_key=mistral_api_key)
response = client.chat.complete(
model = mistral_model,
response_format={"type": "json_object"},
messages=[
UserMessage(content=prompt)
]
)
# Convert the JSON string in the 'content' field to a Python dictionary
response_content = json.loads(response.choices[0].message.content)
return response_content
# Function to get threat model from Ollama hosted LLM.
def get_threat_model_ollama(ollama_model, prompt):
url = "http://localhost:11434/api/generate"
data = {
"model": ollama_model,
"prompt": prompt,
"format": "json",
"stream": False
}
response = requests.post(url, json=data)
outer_json = response.json()
inner_json = json.loads(outer_json['response'])
return inner_json
# Function to get threat model from the Claude response.
def get_threat_model_anthropic(anthropic_api_key, anthropic_model, prompt):
client = Anthropic(api_key=anthropic_api_key)
response = client.messages.create(
model=anthropic_model,
max_tokens=1024,
system="You are a helpful assistant designed to output JSON.",
messages=[
{"role": "user", "content": prompt}
]
)
# Combine all text blocks into a single string
full_content = ''.join(block.text for block in response.content)
# Parse the combined JSON string
response_content = json.loads(full_content)
return response_content