mirror of
https://github.com/MISP/misp-galaxy.git
synced 2024-11-25 16:27:19 +00:00
1080 lines
38 KiB
JSON
1080 lines
38 KiB
JSON
{
|
|
"authors": [
|
|
"MITRE"
|
|
],
|
|
"category": "course-of-action",
|
|
"description": "MITRE ATLAS Mitigation - Adversarial Threat Landscape for Artificial-Intelligence Systems",
|
|
"name": "MITRE ATLAS Course of Action",
|
|
"source": "https://github.com/mitre-atlas/atlas-navigator-data",
|
|
"type": "mitre-atlas-course-of-action",
|
|
"uuid": "951d5a45-43c2-422b-90af-059014f15714",
|
|
"values": [
|
|
{
|
|
"description": "Limit the public release of technical information about the machine learning stack used in an organization's products or services. Technical knowledge of how machine learning is used can be leveraged by adversaries to perform targeting and tailor attacks to the target system. Additionally, consider limiting the release of organizational information - including physical locations, researcher names, and department structures - from which technical details such as machine learning techniques, model architectures, or datasets may be inferred.",
|
|
"meta": {
|
|
"external_id": "AML.M0000",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0000"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "65d21e6b-7abe-4623-8f5c-88011cb362cb",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8c26f51a-c403-4c4d-852a-a1c56fe9e7cd",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "aa17fe8d-62f8-4c4c-b7a2-6858c82dd84b",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "b23cda85-3457-406d-b043-24d2cf9e6fcf",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "40076545-e797-4508-a294-943096a12111",
|
|
"value": "Limit Public Release of Information"
|
|
},
|
|
{
|
|
"description": "Limit public release of technical project details including data, algorithms, model architectures, and model checkpoints that are used in production, or that are representative of those used in production.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0001",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0001"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "a3baff3d-7228-4ab7-ae00-ffe150e7ef8a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c086784e-1494-4f75-a4a0-d3ad054b9428",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "79c75215-ada9-4c22-bfed-7d13fb6e966e",
|
|
"value": "Limit Model Artifact Release"
|
|
},
|
|
{
|
|
"description": "Decreasing the fidelity of model outputs provided to the end user can reduce an adversaries ability to extract information about the model and optimize attacks for the model.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0002",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0002"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "86b5f486-afb8-4aa9-991f-0e24d5737f0c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "943303ef-846b-49d6-b53f-b0b9341ac1ca",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c4e52005-7416-45c4-9feb-8cd5fd34f70a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c552f0b5-2e2c-4f8f-badc-0876ecca7255",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e19c6f8a-f1e2-46cc-9387-03a3092f01ed",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "f78e0ac3-6d72-42ed-b20a-e10d8c752cf6",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "9f92e876-e2c0-4def-afee-626a4a79c524",
|
|
"value": "Passive ML Output Obfuscation"
|
|
},
|
|
{
|
|
"description": "Use techniques to make machine learning models robust to adversarial inputs such as adversarial training or network distillation.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0003",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0003"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "071df654-813a-4708-85dc-f715f785d37f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8735735d-c09d-4298-8e64-9a2b6168a74c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "216f862c-7f34-4676-a913-c4ec6cc4c2cd",
|
|
"value": "Model Hardening"
|
|
},
|
|
{
|
|
"description": "Limit the total number and rate of queries a user can perform.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0004",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0004"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "6c1fca80-3ba9-41c9-8f7b-9824310a94f1",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "86b5f486-afb8-4aa9-991f-0e24d5737f0c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8f644f37-e2e6-468e-b720-f395b8c27fbc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "943303ef-846b-49d6-b53f-b0b9341ac1ca",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "ae71ca3a-8ca4-40d2-bdba-4276b29ac8f9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "b07d147f-51c8-4eb6-9a05-09c86762a9c1",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c4e52005-7416-45c4-9feb-8cd5fd34f70a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c552f0b5-2e2c-4f8f-badc-0876ecca7255",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e19c6f8a-f1e2-46cc-9387-03a3092f01ed",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "f78e0ac3-6d72-42ed-b20a-e10d8c752cf6",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "46b3e92d-600b-47c9-80f5-ed62a5db0377",
|
|
"value": "Restrict Number of ML Model Queries"
|
|
},
|
|
{
|
|
"description": "Establish access controls on internal model registries and limit internal access to production models. Limit access to training data only to approved users.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0005",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0005"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "2680aa95-5620-4677-9c62-b0c3d15d9450",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "452b8fdf-8679-4013-bb38-4d16f65430bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8d644240-ad99-4410-a7f8-3ef8f53a463e",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "a50f02df-1130-4945-94bb-7857952da585",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d1f013a8-11f3-4560-831c-8ed5e39247c9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e0eb2b64-aebd-4412-80f3-b71d7805a65f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "0025dadf-7900-497f-aa03-39f0e319f20e",
|
|
"value": "Control Access to ML Models and Data at Rest"
|
|
},
|
|
{
|
|
"description": "Use an ensemble of models for inference to increase robustness to adversarial inputs. Some attacks may effectively evade one model or model family but be ineffective against others.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0006",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0006"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "071df654-813a-4708-85dc-f715f785d37f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "452b8fdf-8679-4013-bb38-4d16f65430bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8735735d-c09d-4298-8e64-9a2b6168a74c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c552f0b5-2e2c-4f8f-badc-0876ecca7255",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d8292a1c-21e7-4b45-b110-0e05feb30a9a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "dcb586a2-1135-4e2a-97bd-d4adbc79758b",
|
|
"value": "Use Ensemble Methods"
|
|
},
|
|
{
|
|
"description": "Detect and remove or remediate poisoned training data. Training data should be sanitized prior to model training and recurrently for an active learning model.\n\nImplement a filter to limit ingested training data. Establish a content policy that would remove unwanted content such as certain explicit or offensive language from being used.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0007",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0007"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8d644240-ad99-4410-a7f8-3ef8f53a463e",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e0eb2b64-aebd-4412-80f3-b71d7805a65f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "9395d240-cc32-452a-911b-04feea01bcfb",
|
|
"value": "Sanitize Training Data"
|
|
},
|
|
{
|
|
"description": "Validate that machine learning models perform as intended by testing for backdoor triggers or adversarial bias.\nMonitor model for concept drift and training data drift, which may indicate data tampering and poisoning.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0008",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0008"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "452b8fdf-8679-4013-bb38-4d16f65430bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "a50f02df-1130-4945-94bb-7857952da585",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e0eb2b64-aebd-4412-80f3-b71d7805a65f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "01c2ec0a-e257-4a75-9e59-f71aa6362b6e",
|
|
"value": "Validate ML Model"
|
|
},
|
|
{
|
|
"description": "Incorporate multiple sensors to integrate varying perspectives and modalities to avoid a single point of failure susceptible to physical attacks.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0009",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0009"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "071df654-813a-4708-85dc-f715f785d37f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "4d5c6974-0307-4535-bf37-7bb4c6a2ef47",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "1bb9d9a7-c05a-470f-a709-64bd240e2eb0",
|
|
"value": "Use Multi-Modal Sensors"
|
|
},
|
|
{
|
|
"description": "Preprocess all inference data to nullify or reverse potential adversarial perturbations.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0010",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0010"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "071df654-813a-4708-85dc-f715f785d37f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8735735d-c09d-4298-8e64-9a2b6168a74c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c4e52005-7416-45c4-9feb-8cd5fd34f70a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "73a34f24-1ad1-4421-b9c8-c2cbd13e6f47",
|
|
"value": "Input Restoration"
|
|
},
|
|
{
|
|
"description": "Prevent abuse of library loading mechanisms in the operating system and software to load untrusted code by configuring appropriate library loading mechanisms and investigating potential vulnerable software.\n\nFile formats such as pickle files that are commonly used to store machine learning models can contain exploits that allow for loading of malicious libraries.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0011",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0011"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "179e00cb-0948-4282-9132-f8a1f0ff6bd7",
|
|
"value": "Restrict Library Loading"
|
|
},
|
|
{
|
|
"description": "Encrypt sensitive data such as ML models to protect against adversaries attempting to access sensitive data.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0012",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0012"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "6a88dccb-fb37-4f11-a5ad-42908aaee1d0",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d1f013a8-11f3-4560-831c-8ed5e39247c9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e2ebc190-9ff6-496e-afeb-ac868df2361e",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "aad92d43-774b-4612-8437-8d6c7ee7e4af",
|
|
"value": "Encrypt Sensitive Information"
|
|
},
|
|
{
|
|
"description": "Enforce binary and application integrity with digital signature verification to prevent untrusted code from executing. Adversaries can embed malicious code in ML software or models. Enforcement of code signing can prevent the compromise of the machine learning supply chain and prevent execution of malicious code.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0013",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0013"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "452b8fdf-8679-4013-bb38-4d16f65430bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d8292a1c-21e7-4b45-b110-0e05feb30a9a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "88073b07-2fe9-41cb-8e76-6e244fbabc74",
|
|
"value": "Code Signing"
|
|
},
|
|
{
|
|
"description": "Verify the cryptographic checksum of all machine learning artifacts to verify that the file was not modified by an attacker.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0014",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0014"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d2cf31e0-a550-4fe0-8fdb-8941b3ac00d9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "f4fc2abd-71a4-401a-a742-18fc5aeb4bc3",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "cdccb3ab-2dde-41a9-a988-783a25b7bd00",
|
|
"value": "Verify ML Artifacts"
|
|
},
|
|
{
|
|
"description": "Detect and block adversarial inputs or atypical queries that deviate from known benign behavior, exhibit behavior patterns observed in previous attacks or that come from potentially malicious IPs.\nIncorporate adversarial detection algorithms into the ML system prior to the ML model.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0015",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0015"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "071df654-813a-4708-85dc-f715f785d37f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8735735d-c09d-4298-8e64-9a2b6168a74c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8f644f37-e2e6-468e-b720-f395b8c27fbc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c4e52005-7416-45c4-9feb-8cd5fd34f70a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "0ed2ef71-cdc9-4eef-8432-1c3dadbdda20",
|
|
"value": "Adversarial Input Detection"
|
|
},
|
|
{
|
|
"description": "Vulnerability scanning is used to find potentially exploitable software vulnerabilities to remediate them.\n\nFile formats such as pickle files that are commonly used to store machine learning models can contain exploits that allow for arbitrary code execution.\nBoth model artifacts and downstream products produced by models should be scanned for known vulnerabilities.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0016",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0016"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "c704a49c-abf0-4258-9919-a862b1865469",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "79752061-aac1-4ed9-b7f3-3b4dc5e81280",
|
|
"value": "Vulnerability Scanning"
|
|
},
|
|
{
|
|
"description": "Deploying ML models to edge devices can increase the attack surface of the system.\nConsider serving models in the cloud to reduce the level of access the adversary has to the model.\nAlso consider computing features in the cloud to prevent gray-box attacks, where an adversary has access to the model preprocessing methods.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0017",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0017"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "3de90963-bc9f-4ae1-b780-7d05e46eacdd",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "452b8fdf-8679-4013-bb38-4d16f65430bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "ab01ba21-1438-4cd9-a588-92eb271086bc",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "432c3a44-3974-4b73-9eb9-fa5dd5298e47",
|
|
"value": "Model Distribution Methods"
|
|
},
|
|
{
|
|
"description": "Educate ML model developers on secure coding practices and ML vulnerabilities.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0018",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0018"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "8c849dd4-5d15-45aa-b5b2-59c96a3ab939",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "cce983e7-13a2-4545-8c39-ec6c8dff148d",
|
|
"value": "User Training"
|
|
},
|
|
{
|
|
"description": "Require users to verify their identities before accessing a production model.\nRequire authentication for API endpoints and monitor production model queries to ensure compliance with usage policies and to prevent model misuse.\n",
|
|
"meta": {
|
|
"external_id": "AML.M0019",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0019"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "90a420d4-3f03-4800-86c0-223c4376804a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "b07d147f-51c8-4eb6-9a05-09c86762a9c1",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "7b00dd51-f719-433d-afd6-3d386f64386d",
|
|
"value": "Control Access to ML Models and Data in Production"
|
|
},
|
|
{
|
|
"description": "Guardrails are safety controls that are placed between a generative AI model and the output shared with the user to prevent undesired inputs and outputs.\nGuardrails can take the form of validators such as filters, rule-based logic, or regular expressions, as well as AI-based approaches, such as classifiers and utilizing LLMs, or named entity recognition (NER) to evaluate the safety of the prompt or response. Domain specific methods can be employed to reduce risks in a variety of areas such as etiquette, brand damage, jailbreaking, false information, code exploits, SQL injections, and data leakage.",
|
|
"meta": {
|
|
"external_id": "AML.M0020",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0020"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d2cf31e0-a550-4fe0-8fdb-8941b3ac00d9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "b8511570-3320-4733-a0e1-134e376e7530",
|
|
"value": "Generative AI Guardrails"
|
|
},
|
|
{
|
|
"description": "Guidelines are safety controls that are placed between user-provided input and a generative AI model to help direct the model to produce desired outputs and prevent undesired outputs.\n\nGuidelines can be implemented as instructions appended to all user prompts or as part of the instructions in the system prompt. They can define the goal(s), role, and voice of the system, as well as outline safety and security parameters.",
|
|
"meta": {
|
|
"external_id": "AML.M0021",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0021"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "d55bd0c8-2db0-400b-9097-c7cec00e2b91",
|
|
"value": "Generative AI Guidelines"
|
|
},
|
|
{
|
|
"description": "When training or fine-tuning a generative AI model it is important to utilize techniques that improve model alignment with safety, security, and content policies.\n\nThe fine-tuning process can potentially remove built-in safety mechanisms in a generative AI model, but utilizing techniques such as Supervised Fine-Tuning, Reinforcement Learning from Human Feedback or AI Feedback, and Targeted Safety Context Distillation can improve the safety and alignment of the model.",
|
|
"meta": {
|
|
"external_id": "AML.M0022",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0022"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "172427e3-9ecc-49a3-b628-96b824cc4131",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "45d378aa-20ae-401d-bf61-7f00104eeaca",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "adbb0dd5-ff66-4b2f-869f-bfb3fdb45fc8",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e98acce8-ed69-4ebe-845b-1bcb662836ba",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "1fca595d-b140-4ce0-8fd8-c4c6bee87540",
|
|
"value": "Generative AI Model Alignment"
|
|
},
|
|
{
|
|
"description": "An AI Bill of Materials (AI BOM) contains a full listing of artifacts and resources that were used in building the AI. The AI BOM can help mitigate supply chain risks and enable rapid response to reported vulnerabilities.\n\nThis can include maintaining dataset provenance, i.e. a detailed history of datasets used for AI applications. The history can include information about the dataset source as well as well as a complete record of any modifications.",
|
|
"meta": {
|
|
"external_id": "AML.M0023",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0023"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "be6ef5c5-1ecb-486d-9743-42085bd2c256",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e3b9d41a-d2f9-4825-942f-1c4a30b4d2f9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "f4fc2abd-71a4-401a-a742-18fc5aeb4bc3",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "1f63b56d-034f-477d-ab49-399c1aa1a22a",
|
|
"value": "AI Bill of Materials"
|
|
},
|
|
{
|
|
"description": "Implement logging of inputs and outputs of deployed AI models. Monitoring logs can help to detect security threats and mitigate impacts.",
|
|
"meta": {
|
|
"external_id": "AML.M0024",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0024"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "19cd2d12-66ff-487c-a05c-e058b027efc9",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "86b5f486-afb8-4aa9-991f-0e24d5737f0c",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "90a420d4-3f03-4800-86c0-223c4376804a",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "a3660a2d-f6e5-4f1b-9618-332cceb389c8",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "a4a55526-2f1f-403b-9691-609e46381e17",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "b07d147f-51c8-4eb6-9a05-09c86762a9c1",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "b5626410-b33d-4487-9c0f-2b7d844b8e95",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "d911e8cb-0601-42f1-90de-7ce0b21cd578",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e19c6f8a-f1e2-46cc-9387-03a3092f01ed",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "f78e0ac3-6d72-42ed-b20a-e10d8c752cf6",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "fa5108b2-3dc8-42dc-93a3-902f1bf74521",
|
|
"value": "AI Telemetry Logging"
|
|
},
|
|
{
|
|
"description": "Maintain a detailed history of datasets used for AI applications. The history should include information about the dataset's source as well as a complete record of any modifications.",
|
|
"meta": {
|
|
"external_id": "AML.M0025",
|
|
"refs": [
|
|
"https://atlas.mitre.org/mitigations/AML.M0025"
|
|
]
|
|
},
|
|
"related": [
|
|
{
|
|
"dest-uuid": "0ec538ca-589b-4e42-bcaa-06097a0d679f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "8d644240-ad99-4410-a7f8-3ef8f53a463e",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
},
|
|
{
|
|
"dest-uuid": "e0eb2b64-aebd-4412-80f3-b71d7805a65f",
|
|
"tags": [
|
|
"estimative-language:likelihood-probability=\"almost-certain\""
|
|
],
|
|
"type": "mitigates"
|
|
}
|
|
],
|
|
"uuid": "005a5427-4b1e-41c2-a7aa-eda9ae9a9815",
|
|
"value": "Maintain AI Dataset Provenance"
|
|
}
|
|
],
|
|
"version": 13
|
|
}
|