mirror of
https://github.com/MISP/misp-galaxy.git
synced 2024-11-22 14:57:18 +00:00
chg: [mitre-atlas] updated
This commit is contained in:
parent
cce72b69e6
commit
ffffe50748
3 changed files with 511 additions and 28 deletions
|
@ -250,7 +250,7 @@
|
||||||
"value": "Train Proxy via Gathered ML Artifacts"
|
"value": "Train Proxy via Gathered ML Artifacts"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may replicate a private model.\nBy repeatedly querying the victim's [ML Model Inference API Access](/techniques/AML.T0040), the adversary can collect the target model's inferences into a dataset.\nThe inferences are used as labels for training a separate model offline that will mimic the behavior and performance of the target model.\n\nA replicated model that closely mimic's the target model is a valuable resource in staging the attack.\nThe adversary can use the replicated model to [Craft Adversarial Data](/techniques/AML.T0043) for various purposes (e.g. [Evade ML Model](/techniques/AML.T0015), [Spamming ML System with Chaff Data](/techniques/AML.T0046)).\n",
|
"description": "Adversaries may replicate a private model.\nBy repeatedly querying the victim's [AI Model Inference API Access](/techniques/AML.T0040), the adversary can collect the target model's inferences into a dataset.\nThe inferences are used as labels for training a separate model offline that will mimic the behavior and performance of the target model.\n\nA replicated model that closely mimic's the target model is a valuable resource in staging the attack.\nThe adversary can use the replicated model to [Craft Adversarial Data](/techniques/AML.T0043) for various purposes (e.g. [Evade ML Model](/techniques/AML.T0015), [Spamming ML System with Chaff Data](/techniques/AML.T0046)).\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0005.001",
|
"external_id": "AML.T0005.001",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -310,7 +310,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "79460396-01b4-4e91-8695-7d26df1abb95",
|
"uuid": "79460396-01b4-4e91-8695-7d26df1abb95",
|
||||||
"value": "Active Scanning (ATLAS)"
|
"value": "Active Scanning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may search private sources to identify machine learning artifacts that exist on the system and gather information about them.\nThese artifacts can include the software stack used to train and deploy models, training and testing data management systems, container registries, software repositories, and model zoos.\n\nThis information can be used to identify targets for further collection, exfiltration, or disruption, and to tailor and improve attacks.\n",
|
"description": "Adversaries may search private sources to identify machine learning artifacts that exist on the system and gather information about them.\nThese artifacts can include the software stack used to train and deploy models, training and testing data management systems, container registries, software repositories, and model zoos.\n\nThis information can be used to identify targets for further collection, exfiltration, or disruption, and to tailor and improve attacks.\n",
|
||||||
|
@ -330,7 +330,7 @@
|
||||||
"value": "Discover ML Artifacts"
|
"value": "Discover ML Artifacts"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may buy, lease, or rent infrastructure for use throughout their operation.\nA wide variety of infrastructure exists for hosting and orchestrating adversary operations.\nInfrastructure solutions include physical or cloud servers, domains, mobile devices, and third-party web services.\nFree resources may also be used, but they are typically limited.\n\nUse of these infrastructure solutions allows an adversary to stage, launch, and execute an operation.\nSolutions may help adversary operations blend in with traffic that is seen as normal, such as contact to third-party web services.\nDepending on the implementation, adversaries may use infrastructure that makes it difficult to physically tie back to them as well as utilize infrastructure that can be rapidly provisioned, modified, and shut down.\n",
|
"description": "Adversaries may buy, lease, or rent infrastructure for use throughout their operation.\nA wide variety of infrastructure exists for hosting and orchestrating adversary operations.\nInfrastructure solutions include physical or cloud servers, domains, mobile devices, and third-party web services.\nFree resources may also be used, but they are typically limited.\nInfrastructure can also include physical components such as countermeasures that degrade or disrupt AI components or sensors, including printed materials, wearables, or disguises.\n\nUse of these infrastructure solutions allows an adversary to stage, launch, and execute an operation.\nSolutions may help adversary operations blend in with traffic that is seen as normal, such as contact to third-party web services.\nDepending on the implementation, adversaries may use infrastructure that makes it difficult to physically tie back to them as well as utilize infrastructure that can be rapidly provisioned, modified, and shut down.",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0008",
|
"external_id": "AML.T0008",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -393,7 +393,53 @@
|
||||||
"value": "Consumer Hardware"
|
"value": "Consumer Hardware"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may gain initial access to a system by compromising the unique portions of the ML supply chain.\nThis could include [GPU Hardware](/techniques/AML.T0010.000), [Data](/techniques/AML.T0010.002) and its annotations, parts of the ML [ML Software](/techniques/AML.T0010.001) stack, or the [Model](/techniques/AML.T0010.003) itself.\nIn some instances the attacker will need secondary access to fully carry out an attack using compromised components of the supply chain.\n",
|
"description": "Adversaries may acquire domains that can be used during targeting. Domain names are the human readable names used to represent one or more IP addresses. They can be purchased or, in some cases, acquired for free.\n\nAdversaries may use acquired domains for a variety of purposes (see [ATT&CK](https://attack.mitre.org/techniques/T1583/001/)). Large AI datasets are often distributed as a list of URLs to individual datapoints. Adversaries may acquire expired domains that are included in these datasets and replace individual datapoints with poisoned examples ([Publish Poisoned Datasets](/techniques/AML.T0019)).",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0008.002",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:discovery"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0008.002"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "782c346d-9af5-4145-b6c6-b9cccdc2c950",
|
||||||
|
"type": "subtechnique-of"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "dd88c52a-ec0f-42fa-8622-f992d6bcf2d5",
|
||||||
|
"value": "Domains"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may acquire or manufacture physical countermeasures to aid or support their attack.\n\nThese components may be used to disrupt or degrade the model, such as adversarial patterns printed on stickers or T-shirts, disguises, or decoys. They may also be used to disrupt or degrade the sensors used in capturing data, such as laser pointers, light bulbs, or other tools.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0008.003",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:discovery"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0008.003"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "782c346d-9af5-4145-b6c6-b9cccdc2c950",
|
||||||
|
"type": "subtechnique-of"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "c2ca6b46-bcdf-45a8-b33d-3272c7a65cde",
|
||||||
|
"value": "Physical Countermeasures"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may gain initial access to a system by compromising the unique portions of the ML supply chain.\nThis could include [Hardware](/techniques/AML.T0010.000), [Data](/techniques/AML.T0010.002) and its annotations, parts of the ML [ML Software](/techniques/AML.T0010.001) stack, or the [Model](/techniques/AML.T0010.003) itself.\nIn some instances the attacker will need secondary access to fully carry out an attack using compromised components of the supply chain.\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0010",
|
"external_id": "AML.T0010",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -410,7 +456,7 @@
|
||||||
"value": "ML Supply Chain Compromise"
|
"value": "ML Supply Chain Compromise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Most machine learning systems require access to certain specialized hardware, typically GPUs.\nAdversaries can target machine learning systems by specifically targeting the GPU supply chain.\n",
|
"description": "Adversaries may target AI systems by disrupting or manipulating the hardware supply chain. AI models often run on specialized hardware such as GPUs, TPUs, or embedded devices, but may also be optimized to operate on CPUs.",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0010.000",
|
"external_id": "AML.T0010.000",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -430,7 +476,7 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"uuid": "8dfc1d73-0de8-4daa-a8cf-83e019347395",
|
"uuid": "8dfc1d73-0de8-4daa-a8cf-83e019347395",
|
||||||
"value": "GPU Hardware"
|
"value": "Hardware"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Most machine learning systems rely on a limited set of machine learning frameworks.\nAn adversary could get access to a large number of machine learning systems through a comprise of one of their supply chains.\nMany machine learning projects also rely on other open source implementations of various algorithms.\nThese can also be compromised in a targeted way to get access to specific systems.\n",
|
"description": "Most machine learning systems rely on a limited set of machine learning frameworks.\nAn adversary could get access to a large number of machine learning systems through a comprise of one of their supply chains.\nMany machine learning projects also rely on other open source implementations of various algorithms.\nThese can also be compromised in a targeted way to get access to specific systems.\n",
|
||||||
|
@ -516,7 +562,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "8c849dd4-5d15-45aa-b5b2-59c96a3ab939",
|
"uuid": "8c849dd4-5d15-45aa-b5b2-59c96a3ab939",
|
||||||
"value": "User Execution (ATLAS)"
|
"value": "User Execution"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may develop unsafe ML artifacts that when executed have a deleterious effect.\nThe adversary can use this technique to establish persistent access to systems.\nThese models may be introduced via a [ML Supply Chain Compromise](/techniques/AML.T0010).\n\nSerialization of models is a popular technique for model storage, transfer, and loading.\nHowever, this format without proper checking presents an opportunity for code execution.\n",
|
"description": "Adversaries may develop unsafe ML artifacts that when executed have a deleterious effect.\nThe adversary can use this technique to establish persistent access to systems.\nThese models may be introduced via a [ML Supply Chain Compromise](/techniques/AML.T0010).\n\nSerialization of models is a popular technique for model storage, transfer, and loading.\nHowever, this format without proper checking presents an opportunity for code execution.\n",
|
||||||
|
@ -541,6 +587,29 @@
|
||||||
"uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
"uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
||||||
"value": "Unsafe ML Artifacts"
|
"value": "Unsafe ML Artifacts"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may develop malicious software packages that when imported by a user have a deleterious effect.\nMalicious packages may behave as expected to the user. They may be introduced via [ML Supply Chain Compromise](/techniques/AML.T0010). They may not present as obviously malicious to the user and may appear to be useful for an AI-related task.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0011.001",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:impact"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0011.001"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "89731d07-679e-4da3-8f70-aba314068a89",
|
||||||
|
"type": "subtechnique-of"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "7d76070d-2124-4ee6-913d-6015a697eaf6",
|
||||||
|
"value": "Malicious Package"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may obtain and abuse credentials of existing accounts as a means of gaining Initial Access.\nCredentials may take the form of usernames and passwords of individual user accounts or API keys that provide access to various ML resources and services.\n\nCompromised credentials may provide access to additional ML artifacts and allow the adversary to perform [Discover ML Artifacts](/techniques/AML.T0007).\nCompromised credentials may also grant an adversary increased privileges such as write access to ML artifacts used during development or production.\n",
|
"description": "Adversaries may obtain and abuse credentials of existing accounts as a means of gaining Initial Access.\nCredentials may take the form of usernames and passwords of individual user accounts or API keys that provide access to various ML resources and services.\n\nCompromised credentials may provide access to additional ML artifacts and allow the adversary to perform [Discover ML Artifacts](/techniques/AML.T0007).\nCompromised credentials may also grant an adversary increased privileges such as write access to ML artifacts used during development or production.\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
|
@ -556,7 +625,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "1b047901-cd87-4d1d-aa88-d7335855b65f",
|
"uuid": "1b047901-cd87-4d1d-aa88-d7335855b65f",
|
||||||
"value": "Valid Accounts (ATLAS)"
|
"value": "Valid Accounts"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may discover the ontology of a machine learning model's output space, for example, the types of objects a model can detect.\nThe adversary may discovery the ontology by repeated queries to the model, forcing it to enumerate its output space.\nOr the ontology may be discovered in a configuration file or in documentation about the model.\n\nThe model ontology helps the adversary understand how the model is being used by the victim.\nIt is useful to the adversary in creating targeted attacks.\n",
|
"description": "Adversaries may discover the ontology of a machine learning model's output space, for example, the types of objects a model can detect.\nThe adversary may discovery the ontology by repeated queries to the model, forcing it to enumerate its output space.\nOr the ontology may be discovered in a configuration file or in documentation about the model.\n\nThe model ontology helps the adversary understand how the model is being used by the victim.\nIt is useful to the adversary in creating targeted attacks.\n",
|
||||||
|
@ -626,7 +695,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "db2b3112-a99b-45a0-be10-c69157b616f0",
|
"uuid": "db2b3112-a99b-45a0-be10-c69157b616f0",
|
||||||
"value": "Obtain Capabilities (ATLAS)"
|
"value": "Obtain Capabilities"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may search for existing open source implementations of machine learning attacks. The research community often publishes their code for reproducibility and to further future research. Libraries intended for research purposes, such as CleverHans, the Adversarial Robustness Toolbox, and FoolBox, can be weaponized by an adversary. Adversaries may also obtain and use tools that were not originally designed for adversarial ML attacks as part of their attack.",
|
"description": "Adversaries may search for existing open source implementations of machine learning attacks. The research community often publishes their code for reproducibility and to further future research. Libraries intended for research purposes, such as CleverHans, the Adversarial Robustness Toolbox, and FoolBox, can be weaponized by an adversary. Adversaries may also obtain and use tools that were not originally designed for adversarial ML attacks as part of their attack.",
|
||||||
|
@ -672,7 +741,7 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"uuid": "d18afb87-0de2-43dc-ab6a-eb914a7dbae7",
|
"uuid": "d18afb87-0de2-43dc-ab6a-eb914a7dbae7",
|
||||||
"value": "Software Tools (ATLAS)"
|
"value": "Software Tools"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may develop their own capabilities to support operations. This process encompasses identifying requirements, building solutions, and deploying capabilities. Capabilities used to support attacks on ML systems are not necessarily ML-based themselves. Examples include setting up websites with adversarial information or creating Jupyter notebooks with obfuscated exfiltration code.",
|
"description": "Adversaries may develop their own capabilities to support operations. This process encompasses identifying requirements, building solutions, and deploying capabilities. Capabilities used to support attacks on ML systems are not necessarily ML-based themselves. Examples include setting up websites with adversarial information or creating Jupyter notebooks with obfuscated exfiltration code.",
|
||||||
|
@ -689,7 +758,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "c9153697-7d92-43aa-a16e-38436beff79d",
|
"uuid": "c9153697-7d92-43aa-a16e-38436beff79d",
|
||||||
"value": "Develop Capabilities (ATLAS)"
|
"value": "Develop Capabilities"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may develop their own adversarial attacks.\nThey may leverage existing libraries as a starting point ([Adversarial ML Attack Implementations](/techniques/AML.T0016.000)).\nThey may implement ideas described in public research papers or develop custom made attacks for the victim model.\n",
|
"description": "Adversaries may develop their own adversarial attacks.\nThey may leverage existing libraries as a starting point ([Adversarial ML Attack Implementations](/techniques/AML.T0016.000)).\nThey may implement ideas described in public research papers or develop custom made attacks for the victim model.\n",
|
||||||
|
@ -830,10 +899,10 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "aaa79096-814f-4fb0-a553-1701b2765317",
|
"uuid": "aaa79096-814f-4fb0-a553-1701b2765317",
|
||||||
"value": "Establish Accounts (ATLAS)"
|
"value": "Establish Accounts"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may exfiltrate private information via [ML Model Inference API Access](/techniques/AML.T0040).\nML Models have been shown leak private information about their training data (e.g. [Infer Training Data Membership](/techniques/AML.T0024.000), [Invert ML Model](/techniques/AML.T0024.001)).\nThe model itself may also be extracted ([Extract ML Model](/techniques/AML.T0024.002)) for the purposes of [ML Intellectual Property Theft](/techniques/AML.T0048.004).\n\nExfiltration of information relating to private training data raises privacy concerns.\nPrivate training data may include personally identifiable information, or other protected data.\n",
|
"description": "Adversaries may exfiltrate private information via [AI Model Inference API Access](/techniques/AML.T0040).\nML Models have been shown leak private information about their training data (e.g. [Infer Training Data Membership](/techniques/AML.T0024.000), [Invert ML Model](/techniques/AML.T0024.001)).\nThe model itself may also be extracted ([Extract ML Model](/techniques/AML.T0024.002)) for the purposes of [ML Intellectual Property Theft](/techniques/AML.T0048.004).\n\nExfiltration of information relating to private training data raises privacy concerns.\nPrivate training data may include personally identifiable information, or other protected data.\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0024",
|
"external_id": "AML.T0024",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -896,7 +965,7 @@
|
||||||
"value": "Invert ML Model"
|
"value": "Invert ML Model"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may extract a functional copy of a private model.\nBy repeatedly querying the victim's [ML Model Inference API Access](/techniques/AML.T0040), the adversary can collect the target model's inferences into a dataset.\nThe inferences are used as labels for training a separate model offline that will mimic the behavior and performance of the target model.\n\nAdversaries may extract the model to avoid paying per query in a machine learning as a service setting.\nModel extraction is used for [ML Intellectual Property Theft](/techniques/AML.T0048.004).\n",
|
"description": "Adversaries may extract a functional copy of a private model.\nBy repeatedly querying the victim's [AI Model Inference API Access](/techniques/AML.T0040), the adversary can collect the target model's inferences into a dataset.\nThe inferences are used as labels for training a separate model offline that will mimic the behavior and performance of the target model.\n\nAdversaries may extract the model to avoid paying per query in a machine learning as a service setting.\nModel extraction is used for [ML Intellectual Property Theft](/techniques/AML.T0048.004).\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0024.002",
|
"external_id": "AML.T0024.002",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -1018,7 +1087,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "9f998b9a-d20e-48e7-bee5-034ed5a696dd",
|
"uuid": "9f998b9a-d20e-48e7-bee5-034ed5a696dd",
|
||||||
"value": "Data from Information Repositories (ATLAS)"
|
"value": "Data from Information Repositories"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may search local system sources, such as file systems and configuration files or local databases, to find files of interest and sensitive data prior to Exfiltration.\n\nThis can include basic fingerprinting information and sensitive data such as ssh keys.\n",
|
"description": "Adversaries may search local system sources, such as file systems and configuration files or local databases, to find files of interest and sensitive data prior to Exfiltration.\n\nThis can include basic fingerprinting information and sensitive data such as ssh keys.\n",
|
||||||
|
@ -1035,10 +1104,10 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "a7f17bbd-e2fd-4413-89e1-a5e5226cc23c",
|
"uuid": "a7f17bbd-e2fd-4413-89e1-a5e5226cc23c",
|
||||||
"value": "Data from Local System (ATLAS)"
|
"value": "Data from Local System"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may gain access to a model via legitimate access to the inference API.\nInference API access can be a source of information to the adversary ([Discover ML Model Ontology](/techniques/AML.T0013), [Discover ML Model Family](/techniques/AML.T0014)), a means of staging the attack ([Verify Attack](/techniques/AML.T0042), [Craft Adversarial Data](/techniques/AML.T0043)), or for introducing data to the target system for Impact ([Evade ML Model](/techniques/AML.T0015), [Erode ML Model Integrity](/techniques/AML.T0031)).\n",
|
"description": "Adversaries may gain access to a model via legitimate access to the inference API.\nInference API access can be a source of information to the adversary ([Discover ML Model Ontology](/techniques/AML.T0013), [Discover ML Model Family](/techniques/AML.T0014)), a means of staging the attack ([Verify Attack](/techniques/AML.T0042), [Craft Adversarial Data](/techniques/AML.T0043)), or for introducing data to the target system for Impact ([Evade ML Model](/techniques/AML.T0015), [Erode ML Model Integrity](/techniques/AML.T0031)).\n\nMany systems rely on the same models provided via an inference API, which means they share the same vulnerabilities. This is especially true of foundation models which are prohibitively resource intensive to train. Adversaries may use their access to model APIs to identify vulnerabilities such as jailbreaks or hallucinations and then target applications that use the same models.",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0040",
|
"external_id": "AML.T0040",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -1052,7 +1121,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "90a420d4-3f03-4800-86c0-223c4376804a",
|
"uuid": "90a420d4-3f03-4800-86c0-223c4376804a",
|
||||||
"value": "ML Model Inference API Access"
|
"value": "AI Model Inference API Access"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "In addition to the attacks that take place purely in the digital domain, adversaries may also exploit the physical environment for their attacks.\nIf the model is interacting with data collected from the real world in some way, the adversary can influence the model through access to wherever the data is being collected.\nBy modifying the data in the collection process, the adversary can perform modified versions of attacks designed for digital access.\n",
|
"description": "In addition to the attacks that take place purely in the digital domain, adversaries may also exploit the physical environment for their attacks.\nIf the model is interacting with data collected from the real world in some way, the adversary can influence the model through access to wherever the data is being collected.\nBy modifying the data in the collection process, the adversary can perform modified versions of attacks designed for digital access.\n",
|
||||||
|
@ -1129,7 +1198,7 @@
|
||||||
"value": "White-Box Optimization"
|
"value": "White-Box Optimization"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "In Black-Box attacks, the adversary has black-box (i.e. [ML Model Inference API Access](/techniques/AML.T0040) via API access) access to the target model.\nWith black-box attacks, the adversary may be using an API that the victim is monitoring.\nThese attacks are generally less effective and require more inferences than [White-Box Optimization](/techniques/AML.T0043.000) attacks, but they require much less access.\n",
|
"description": "In Black-Box attacks, the adversary has black-box (i.e. [AI Model Inference API Access](/techniques/AML.T0040) via API access) access to the target model.\nWith black-box attacks, the adversary may be using an API that the victim is monitoring.\nThese attacks are generally less effective and require more inferences than [White-Box Optimization](/techniques/AML.T0043.000) attacks, but they require much less access.\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0043.001",
|
"external_id": "AML.T0043.001",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -1152,7 +1221,7 @@
|
||||||
"value": "Black-Box Optimization"
|
"value": "Black-Box Optimization"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "In Black-Box Transfer attacks, the adversary uses one or more proxy models (trained via [Create Proxy ML Model](/techniques/AML.T0005) or [Train Proxy via Replication](/techniques/AML.T0005.001)) they have full access to and are representative of the target model.\nThe adversary uses [White-Box Optimization](/techniques/AML.T0043.000) on the proxy models to generate adversarial examples.\nIf the set of proxy models are close enough to the target model, the adversarial example should generalize from one to another.\nThis means that an attack that works for the proxy models will likely then work for the target model.\nIf the adversary has [ML Model Inference API Access](/techniques/AML.T0040), they may use [Verify Attack](/techniques/AML.T0042) to confirm the attack is working and incorporate that information into their training process.\n",
|
"description": "In Black-Box Transfer attacks, the adversary uses one or more proxy models (trained via [Create Proxy ML Model](/techniques/AML.T0005) or [Train Proxy via Replication](/techniques/AML.T0005.001)) they have full access to and are representative of the target model.\nThe adversary uses [White-Box Optimization](/techniques/AML.T0043.000) on the proxy models to generate adversarial examples.\nIf the set of proxy models are close enough to the target model, the adversarial example should generalize from one to another.\nThis means that an attack that works for the proxy models will likely then work for the target model.\nIf the adversary has [AI Model Inference API Access](/techniques/AML.T0040), they may use [Verify Attack](/techniques/AML.T0042) to confirm the attack is working and incorporate that information into their training process.\n",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.T0043.002",
|
"external_id": "AML.T0043.002",
|
||||||
"kill_chain": [
|
"kill_chain": [
|
||||||
|
@ -1418,7 +1487,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "47d73872-5336-44f7-81e3-d30bc7e039dd",
|
"uuid": "47d73872-5336-44f7-81e3-d30bc7e039dd",
|
||||||
"value": "Exploit Public-Facing Application (ATLAS)"
|
"value": "Exploit Public-Facing Application"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may abuse command and script interpreters to execute commands, scripts, or binaries. These interfaces and languages provide ways of interacting with computer systems and are a common feature across many different platforms. Most systems come with some built-in command-line interface and scripting capabilities, for example, macOS and Linux distributions include some flavor of Unix Shell while Windows installations include the Windows Command Shell and PowerShell.\n\nThere are also cross-platform interpreters such as Python, as well as those commonly associated with client applications such as JavaScript and Visual Basic.\n\nAdversaries may abuse these technologies in various ways as a means of executing arbitrary commands. Commands and scripts can be embedded in Initial Access payloads delivered to victims as lure documents or as secondary payloads downloaded from an existing C2. Adversaries may also execute commands through interactive terminals/shells, as well as utilize various Remote Services in order to achieve remote Execution.\n",
|
"description": "Adversaries may abuse command and script interpreters to execute commands, scripts, or binaries. These interfaces and languages provide ways of interacting with computer systems and are a common feature across many different platforms. Most systems come with some built-in command-line interface and scripting capabilities, for example, macOS and Linux distributions include some flavor of Unix Shell while Windows installations include the Windows Command Shell and PowerShell.\n\nThere are also cross-platform interpreters such as Python, as well as those commonly associated with client applications such as JavaScript and Visual Basic.\n\nAdversaries may abuse these technologies in various ways as a means of executing arbitrary commands. Commands and scripts can be embedded in Initial Access payloads delivered to victims as lure documents or as secondary payloads downloaded from an existing C2. Adversaries may also execute commands through interactive terminals/shells, as well as utilize various Remote Services in order to achieve remote Execution.\n",
|
||||||
|
@ -1435,7 +1504,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "716d3a6b-2f8c-4a1f-85f7-d884bb7b2800",
|
"uuid": "716d3a6b-2f8c-4a1f-85f7-d884bb7b2800",
|
||||||
"value": "Command and Scripting Interpreter (ATLAS)"
|
"value": "Command and Scripting Interpreter"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "An adversary may craft malicious prompts as inputs to an LLM that cause the LLM to act in unintended ways.\nThese \"prompt injections\" are often designed to cause the model to ignore aspects of its original instructions and follow the adversary's instructions instead.\n\nPrompt Injections can be an initial access vector to the LLM that provides the adversary with a foothold to carry out other steps in their operation.\nThey may be designed to bypass defenses in the LLM, or allow the adversary to issue privileged commands.\nThe effects of a prompt injection can persist throughout an interactive session with an LLM.\n\nMalicious prompts may be injected directly by the adversary ([Direct](/techniques/AML.T0051.000)) either to leverage the LLM to generate harmful content or to gain a foothold on the system and lead to further effects.\nPrompts may also be injected indirectly when as part of its normal operation the LLM ingests the malicious prompt from another data source ([Indirect](/techniques/AML.T0051.001)). This type of injection can be used by the adversary to a foothold on the system or to target the user of the LLM.\n",
|
"description": "An adversary may craft malicious prompts as inputs to an LLM that cause the LLM to act in unintended ways.\nThese \"prompt injections\" are often designed to cause the model to ignore aspects of its original instructions and follow the adversary's instructions instead.\n\nPrompt Injections can be an initial access vector to the LLM that provides the adversary with a foothold to carry out other steps in their operation.\nThey may be designed to bypass defenses in the LLM, or allow the adversary to issue privileged commands.\nThe effects of a prompt injection can persist throughout an interactive session with an LLM.\n\nMalicious prompts may be injected directly by the adversary ([Direct](/techniques/AML.T0051.000)) either to leverage the LLM to generate harmful content or to gain a foothold on the system and lead to further effects.\nPrompts may also be injected indirectly when as part of its normal operation the LLM ingests the malicious prompt from another data source ([Indirect](/techniques/AML.T0051.001)). This type of injection can be used by the adversary to a foothold on the system or to target the user of the LLM.\n",
|
||||||
|
@ -1524,7 +1593,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "1f1f14ef-7d04-42b2-9f05-b740113b30f5",
|
"uuid": "1f1f14ef-7d04-42b2-9f05-b740113b30f5",
|
||||||
"value": "Phishing (ATLAS)"
|
"value": "Phishing"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Adversaries may turn LLMs into targeted social engineers.\nLLMs are capable of interacting with users via text conversations.\nThey can be instructed by an adversary to seek sensitive information from a user and act as effective social engineers.\nThey can be targeted towards particular personas defined by the adversary.\nThis allows adversaries to scale spearphishing efforts and target individuals to reveal private information such as credentials to privileged systems.\n",
|
"description": "Adversaries may turn LLMs into targeted social engineers.\nLLMs are capable of interacting with users via text conversations.\nThey can be instructed by an adversary to seek sensitive information from a user and act as effective social engineers.\nThey can be targeted towards particular personas defined by the adversary.\nThis allows adversaries to scale spearphishing efforts and target individuals to reveal private information such as credentials to privileged systems.\n",
|
||||||
|
@ -1600,7 +1669,7 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"uuid": "04d61746-9df1-468e-99d3-0a4685856deb",
|
"uuid": "04d61746-9df1-468e-99d3-0a4685856deb",
|
||||||
"value": "Unsecured Credentials (ATLAS)"
|
"value": "Unsecured Credentials"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "An adversary may induce an LLM to reveal its initial instructions, or \"meta prompt.\"\nDiscovering the meta prompt can inform the adversary about the internal workings of the system.\nPrompt engineering is an emerging field that requires expertise and exfiltrating the meta prompt can prompt in order to steal valuable intellectual property.\n",
|
"description": "An adversary may induce an LLM to reveal its initial instructions, or \"meta prompt.\"\nDiscovering the meta prompt can inform the adversary about the internal workings of the system.\nPrompt engineering is an emerging field that requires expertise and exfiltrating the meta prompt can prompt in order to steal valuable intellectual property.\n",
|
||||||
|
@ -1636,7 +1705,109 @@
|
||||||
},
|
},
|
||||||
"uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
"uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
||||||
"value": "LLM Data Leakage"
|
"value": "LLM Data Leakage"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may publish a poisoned model to a public location such as a model registry or code repository. The poisoned model may be a novel model or a poisoned variant of an existing open-source model. This model may be introduced to a victim system via [ML Supply Chain Compromise](/techniques/AML.T0010).",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0058",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:resource-development"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0058"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "e3b9d41a-d2f9-4825-942f-1c4a30b4d2f9",
|
||||||
|
"value": "Publish Poisoned Models"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may poison or manipulate portions of a dataset to reduce its usefulness, reduce trust, and cause users to waste resources correcting errors.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0059",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:impact"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0059"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "89731d07-679e-4da3-8f70-aba314068a89",
|
||||||
|
"value": "Erode Dataset Integrity"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may create an entity they control, such as a software package, website, or email address to a source hallucinated by an LLM. The hallucinations may take the form of package names commands, URLs, company names, or email addresses that point the victim to the entity controlled by the adversary. When the victim interacts with the adversary-controlled entity, the attack can proceed.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0060",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:resource-development"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0060"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "53c52153-8a3f-4952-8e20-e9ab7ca899a7",
|
||||||
|
"value": "Publish Hallucinated Entities"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "An adversary may use a carefully crafted [LLM Prompt Injection](/techniques/AML.T0051) designed to cause the LLM to replicate the prompt as part of its output. This allows the prompt to propagate to other LLMs and persist on the system. The self-replicating prompt is typically paired with other malicious instructions (ex: [LLM Jailbreak](/techniques/AML.T0054), [LLM Data Leakage](/techniques/AML.T0057)).",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0061",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:persistence"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0061"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "f16a79dd-95ff-4eb7-a986-18a727c4fc9d",
|
||||||
|
"value": "LLM Prompt Self-Replication"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may prompt large language models and identify hallucinated entities.\nThey may request software packages, commands, URLs, organization names, or e-mail addresses, and identify hallucinations with no connected real-world source. Discovered hallucinations provide the adversary with potential targets to [Publish Hallucinated Entities](/techniques/AML.T0060). Different LLMs have been shown to produce the same hallucinations, so the hallucinations exploited by an adversary may affect users of other LLMs.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0062",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:discovery"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0062"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "782c346d-9af5-4145-b6c6-b9cccdc2c950",
|
||||||
|
"value": "Discover LLM Hallucinations"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Adversaries may discover model outputs, such as class scores, whose presence is not required for the system to function and are not intended for use by the end user. Model outputs may be found in logs or may be included in API responses.\nModel outputs may enable the adversary to identify weaknesses in the model and develop attacks.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.T0063",
|
||||||
|
"kill_chain": [
|
||||||
|
"mitre-atlas:discovery"
|
||||||
|
],
|
||||||
|
"mitre_platforms": [
|
||||||
|
"ATLAS"
|
||||||
|
],
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/techniques/AML.T0063"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"uuid": "0926acf1-f3e0-40ef-af0d-89515371bf89",
|
||||||
|
"value": "Discover AI Model Outputs"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": 13
|
"version": 14
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
"uuid": "951d5a45-43c2-422b-90af-059014f15714",
|
"uuid": "951d5a45-43c2-422b-90af-059014f15714",
|
||||||
"values": [
|
"values": [
|
||||||
{
|
{
|
||||||
"description": "Limit the public release of technical information about the machine learning stack used in an organization's products or services. Technical knowledge of how machine learning is used can be leveraged by adversaries to perform targeting and tailor attacks to the target system. Additionally, consider limiting the release of organizational information - including physical locations, researcher names, and department structures - from which technical details such as machine learning techniques, model architectures, or datasets may be inferred.\n",
|
"description": "Limit the public release of technical information about the machine learning stack used in an organization's products or services. Technical knowledge of how machine learning is used can be leveraged by adversaries to perform targeting and tailor attacks to the target system. Additionally, consider limiting the release of organizational information - including physical locations, researcher names, and department structures - from which technical details such as machine learning techniques, model architectures, or datasets may be inferred.",
|
||||||
"meta": {
|
"meta": {
|
||||||
"external_id": "AML.M0000",
|
"external_id": "AML.M0000",
|
||||||
"refs": [
|
"refs": [
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"uuid": "40076545-e797-4508-a294-943096a12111",
|
"uuid": "40076545-e797-4508-a294-943096a12111",
|
||||||
"value": "Limit Release of Public Information"
|
"value": "Limit Public Release of Information"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"description": "Limit public release of technical project details including data, algorithms, model architectures, and model checkpoints that are used in production, or that are representative of those used in production.\n",
|
"description": "Limit public release of technical project details including data, algorithms, model architectures, and model checkpoints that are used in production, or that are representative of those used in production.\n",
|
||||||
|
@ -765,7 +765,316 @@
|
||||||
],
|
],
|
||||||
"uuid": "7b00dd51-f719-433d-afd6-3d386f64386d",
|
"uuid": "7b00dd51-f719-433d-afd6-3d386f64386d",
|
||||||
"value": "Control Access to ML Models and Data in Production"
|
"value": "Control Access to ML Models and Data in Production"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Guardrails are safety controls that are placed between a generative AI model and the output shared with the user to prevent undesired inputs and outputs.\nGuardrails can take the form of validators such as filters, rule-based logic, or regular expressions, as well as AI-based approaches, such as classifiers and utilizing LLMs, or named entity recognition (NER) to evaluate the safety of the prompt or response. Domain specific methods can be employed to reduce risks in a variety of areas such as etiquette, brand damage, jailbreaking, false information, code exploits, SQL injections, and data leakage.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0020",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0020"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "d2cf31e0-a550-4fe0-8fdb-8941b3ac00d9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "b8511570-3320-4733-a0e1-134e376e7530",
|
||||||
|
"value": "Generative AI Guardrails"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Guidelines are safety controls that are placed between user-provided input and a generative AI model to help direct the model to produce desired outputs and prevent undesired outputs.\n\nGuidelines can be implemented as instructions appended to all user prompts or as part of the instructions in the system prompt. They can define the goal(s), role, and voice of the system, as well as outline safety and security parameters.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0021",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0021"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "d55bd0c8-2db0-400b-9097-c7cec00e2b91",
|
||||||
|
"value": "Generative AI Guidelines"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "When training or fine-tuning a generative AI model it is important to utilize techniques that improve model alignment with safety, security, and content policies.\n\nThe fine-tuning process can potentially remove built-in safety mechanisms in a generative AI model, but utilizing techniques such as Supervised Fine-Tuning, Reinforcement Learning from Human Feedback or AI Feedback, and Targeted Safety Context Distillation can improve the safety and alignment of the model.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0022",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0022"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "1fca595d-b140-4ce0-8fd8-c4c6bee87540",
|
||||||
|
"value": "Generative AI Model Alignment"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "An AI Bill of Materials (AI BOM) contains a full listing of artifacts and resources that were used in building the AI. The AI BOM can help mitigate supply chain risks and enable rapid response to reported vulnerabilities.\n\nThis can include maintaining dataset provenance, i.e. a detailed history of datasets used for AI applications. The history can include information about the dataset source as well as well as a complete record of any modifications.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0023",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0023"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e3b9d41a-d2f9-4825-942f-1c4a30b4d2f9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "f4fc2abd-71a4-401a-a742-18fc5aeb4bc3",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "1f63b56d-034f-477d-ab49-399c1aa1a22a",
|
||||||
|
"value": "AI Bill of Materials"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Implement logging of inputs and outputs of deployed AI models. Monitoring logs can help to detect security threats and mitigate impacts.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0024",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0024"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "86b5f486-afb8-4aa9-991f-0e24d5737f0c",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "90a420d4-3f03-4800-86c0-223c4376804a",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "a3660a2d-f6e5-4f1b-9618-332cceb389c8",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "a4a55526-2f1f-403b-9691-609e46381e17",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "b07d147f-51c8-4eb6-9a05-09c86762a9c1",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "b5626410-b33d-4487-9c0f-2b7d844b8e95",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "d911e8cb-0601-42f1-90de-7ce0b21cd578",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e19c6f8a-f1e2-46cc-9387-03a3092f01ed",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "f78e0ac3-6d72-42ed-b20a-e10d8c752cf6",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "fa5108b2-3dc8-42dc-93a3-902f1bf74521",
|
||||||
|
"value": "AI Telemetry Logging"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Maintain a detailed history of datasets used for AI applications. The history should include information about the dataset's source as well as a complete record of any modifications.",
|
||||||
|
"meta": {
|
||||||
|
"external_id": "AML.M0025",
|
||||||
|
"refs": [
|
||||||
|
"https://atlas.mitre.org/mitigations/AML.M0025"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"related": [
|
||||||
|
{
|
||||||
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "8d644240-ad99-4410-a7f8-3ef8f53a463e",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dest-uuid": "e0eb2b64-aebd-4412-80f3-b71d7805a65f",
|
||||||
|
"tags": [
|
||||||
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
||||||
|
],
|
||||||
|
"type": "mitigates"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"uuid": "005a5427-4b1e-41c2-a7aa-eda9ae9a9815",
|
||||||
|
"value": "Maintain AI Dataset Provenance"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": 12
|
"version": 13
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# STIX files for ATLAS are available in the navigator
|
||||||
|
# https://github.com/mitre-atlas/atlas-navigator-data
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
|
|
Loading…
Reference in a new issue