From 1b6e5a4ca9e0449243d59cade36eeb635b5e40e7 Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 13:46:16 -0400 Subject: [PATCH 01/12] rename files to reflect chapter, section, subsection --- .../{010_introduction.md => 01_00_00_introduction.md} | 0 .../{020_stateOfPractice.md => 02_00_00_state_of_practice.md} | 0 ...sentingInformation.md => 03_00_00_representing_information.md} | 0 ...rs-scope.md => 04_00_00_vulnerability_management_decisions.md} | 0 ...50_decision-points_1.md => 05_00_00_likely_decision_points.md} | 0 .../{055_decision-points_2.md => 05_06_00_safety_impact.md} | 0 .../{058_coordination.md => 06_00_00_coordination_decisions.md} | 0 ...9_coordination-publish.md => 06_04_00_publication_decision.md} | 0 .../{060_decision-trees.md => 07_00_00_prioritization.md} | 0 doc/md_src_files/{065_changelog.md => 09_00_00_changelog.md} | 0 ...luationDraftTrees.md => 10_00_00_evaluation_of_draft_trees.md} | 0 .../{080_workedExample.md => 11_00_00_worked_example.md} | 0 .../{082_relatedSystems.md => 12_00_00_related_systems.md} | 0 doc/md_src_files/{090_futureWork.md => 13_00_00_future_work.md} | 0 doc/md_src_files/{100_limitations.md => 14_00_00_limitations.md} | 0 doc/md_src_files/{110_conclusion.md => 15_00_00_conclusion.md} | 0 doc/md_src_files/{120_acks.md => 16_00_00_acknowledgements.md} | 0 doc/md_src_files/{890_contact.md => 17_00_00_contact_us.md} | 0 doc/md_src_files/{900_license.md => 18_00_00_license.md} | 0 ..._referenceDefinitions.md => 19_00_00_reference_definitions.md} | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename doc/md_src_files/{010_introduction.md => 01_00_00_introduction.md} (100%) rename doc/md_src_files/{020_stateOfPractice.md => 02_00_00_state_of_practice.md} (100%) rename doc/md_src_files/{030_representingInformation.md => 03_00_00_representing_information.md} (100%) rename doc/md_src_files/{040_stakeholders-scope.md => 04_00_00_vulnerability_management_decisions.md} (100%) rename doc/md_src_files/{050_decision-points_1.md => 05_00_00_likely_decision_points.md} (100%) rename doc/md_src_files/{055_decision-points_2.md => 05_06_00_safety_impact.md} (100%) rename doc/md_src_files/{058_coordination.md => 06_00_00_coordination_decisions.md} (100%) rename doc/md_src_files/{059_coordination-publish.md => 06_04_00_publication_decision.md} (100%) rename doc/md_src_files/{060_decision-trees.md => 07_00_00_prioritization.md} (100%) rename doc/md_src_files/{065_changelog.md => 09_00_00_changelog.md} (100%) rename doc/md_src_files/{070_evaluationDraftTrees.md => 10_00_00_evaluation_of_draft_trees.md} (100%) rename doc/md_src_files/{080_workedExample.md => 11_00_00_worked_example.md} (100%) rename doc/md_src_files/{082_relatedSystems.md => 12_00_00_related_systems.md} (100%) rename doc/md_src_files/{090_futureWork.md => 13_00_00_future_work.md} (100%) rename doc/md_src_files/{100_limitations.md => 14_00_00_limitations.md} (100%) rename doc/md_src_files/{110_conclusion.md => 15_00_00_conclusion.md} (100%) rename doc/md_src_files/{120_acks.md => 16_00_00_acknowledgements.md} (100%) rename doc/md_src_files/{890_contact.md => 17_00_00_contact_us.md} (100%) rename doc/md_src_files/{900_license.md => 18_00_00_license.md} (100%) rename doc/md_src_files/{999_referenceDefinitions.md => 19_00_00_reference_definitions.md} (100%) diff --git a/doc/md_src_files/010_introduction.md b/doc/md_src_files/01_00_00_introduction.md similarity index 100% rename from doc/md_src_files/010_introduction.md rename to doc/md_src_files/01_00_00_introduction.md diff --git a/doc/md_src_files/020_stateOfPractice.md b/doc/md_src_files/02_00_00_state_of_practice.md similarity index 100% rename from doc/md_src_files/020_stateOfPractice.md rename to doc/md_src_files/02_00_00_state_of_practice.md diff --git a/doc/md_src_files/030_representingInformation.md b/doc/md_src_files/03_00_00_representing_information.md similarity index 100% rename from doc/md_src_files/030_representingInformation.md rename to doc/md_src_files/03_00_00_representing_information.md diff --git a/doc/md_src_files/040_stakeholders-scope.md b/doc/md_src_files/04_00_00_vulnerability_management_decisions.md similarity index 100% rename from doc/md_src_files/040_stakeholders-scope.md rename to doc/md_src_files/04_00_00_vulnerability_management_decisions.md diff --git a/doc/md_src_files/050_decision-points_1.md b/doc/md_src_files/05_00_00_likely_decision_points.md similarity index 100% rename from doc/md_src_files/050_decision-points_1.md rename to doc/md_src_files/05_00_00_likely_decision_points.md diff --git a/doc/md_src_files/055_decision-points_2.md b/doc/md_src_files/05_06_00_safety_impact.md similarity index 100% rename from doc/md_src_files/055_decision-points_2.md rename to doc/md_src_files/05_06_00_safety_impact.md diff --git a/doc/md_src_files/058_coordination.md b/doc/md_src_files/06_00_00_coordination_decisions.md similarity index 100% rename from doc/md_src_files/058_coordination.md rename to doc/md_src_files/06_00_00_coordination_decisions.md diff --git a/doc/md_src_files/059_coordination-publish.md b/doc/md_src_files/06_04_00_publication_decision.md similarity index 100% rename from doc/md_src_files/059_coordination-publish.md rename to doc/md_src_files/06_04_00_publication_decision.md diff --git a/doc/md_src_files/060_decision-trees.md b/doc/md_src_files/07_00_00_prioritization.md similarity index 100% rename from doc/md_src_files/060_decision-trees.md rename to doc/md_src_files/07_00_00_prioritization.md diff --git a/doc/md_src_files/065_changelog.md b/doc/md_src_files/09_00_00_changelog.md similarity index 100% rename from doc/md_src_files/065_changelog.md rename to doc/md_src_files/09_00_00_changelog.md diff --git a/doc/md_src_files/070_evaluationDraftTrees.md b/doc/md_src_files/10_00_00_evaluation_of_draft_trees.md similarity index 100% rename from doc/md_src_files/070_evaluationDraftTrees.md rename to doc/md_src_files/10_00_00_evaluation_of_draft_trees.md diff --git a/doc/md_src_files/080_workedExample.md b/doc/md_src_files/11_00_00_worked_example.md similarity index 100% rename from doc/md_src_files/080_workedExample.md rename to doc/md_src_files/11_00_00_worked_example.md diff --git a/doc/md_src_files/082_relatedSystems.md b/doc/md_src_files/12_00_00_related_systems.md similarity index 100% rename from doc/md_src_files/082_relatedSystems.md rename to doc/md_src_files/12_00_00_related_systems.md diff --git a/doc/md_src_files/090_futureWork.md b/doc/md_src_files/13_00_00_future_work.md similarity index 100% rename from doc/md_src_files/090_futureWork.md rename to doc/md_src_files/13_00_00_future_work.md diff --git a/doc/md_src_files/100_limitations.md b/doc/md_src_files/14_00_00_limitations.md similarity index 100% rename from doc/md_src_files/100_limitations.md rename to doc/md_src_files/14_00_00_limitations.md diff --git a/doc/md_src_files/110_conclusion.md b/doc/md_src_files/15_00_00_conclusion.md similarity index 100% rename from doc/md_src_files/110_conclusion.md rename to doc/md_src_files/15_00_00_conclusion.md diff --git a/doc/md_src_files/120_acks.md b/doc/md_src_files/16_00_00_acknowledgements.md similarity index 100% rename from doc/md_src_files/120_acks.md rename to doc/md_src_files/16_00_00_acknowledgements.md diff --git a/doc/md_src_files/890_contact.md b/doc/md_src_files/17_00_00_contact_us.md similarity index 100% rename from doc/md_src_files/890_contact.md rename to doc/md_src_files/17_00_00_contact_us.md diff --git a/doc/md_src_files/900_license.md b/doc/md_src_files/18_00_00_license.md similarity index 100% rename from doc/md_src_files/900_license.md rename to doc/md_src_files/18_00_00_license.md diff --git a/doc/md_src_files/999_referenceDefinitions.md b/doc/md_src_files/19_00_00_reference_definitions.md similarity index 100% rename from doc/md_src_files/999_referenceDefinitions.md rename to doc/md_src_files/19_00_00_reference_definitions.md From 1ec1629ef2f2d4d6d37d9216137be85183106b1e Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 13:58:10 -0400 Subject: [PATCH 02/12] Break up first half of chapter 5 into one file per section. Also reorder utility to come after its subcomponent descriptions. --- .../05_00_00_likely_decision_points.md | 201 ------------------ doc/md_src_files/05_01_00_exploitation.md | 37 ++++ doc/md_src_files/05_02_00_technical_impact.md | 34 +++ doc/md_src_files/05_03_00_automatable.md | 38 ++++ doc/md_src_files/05_04_00_value_density.md | 42 ++++ doc/md_src_files/05_05_00_utility.md | 47 ++++ 6 files changed, 198 insertions(+), 201 deletions(-) create mode 100644 doc/md_src_files/05_01_00_exploitation.md create mode 100644 doc/md_src_files/05_02_00_technical_impact.md create mode 100644 doc/md_src_files/05_03_00_automatable.md create mode 100644 doc/md_src_files/05_04_00_value_density.md create mode 100644 doc/md_src_files/05_05_00_utility.md diff --git a/doc/md_src_files/05_00_00_likely_decision_points.md b/doc/md_src_files/05_00_00_likely_decision_points.md index 67e1103e..47912a63 100644 --- a/doc/md_src_files/05_00_00_likely_decision_points.md +++ b/doc/md_src_files/05_00_00_likely_decision_points.md @@ -8,204 +8,3 @@ We propose satisfactory decision points for vulnerability management in the next Each section has a subsection with advice on gathering information about the decision point. [SSVC using Current Information Sources](#ssvc-using-current-information-sources) will provide some suggestions about how existing sources of information about vulnerabilities can be used to collate responses to these decision points. -## Exploitation -> Evidence of Active Exploitation of a Vulnerability - -The intent of this measure is the present state of exploitation of the vulnerability. The intent is not to predict future exploitation but only to acknowledge the current state of affairs. Predictive systems, such as EPSS, could be used to augment this decision or to notify stakeholders of likely changes [@jacobs2019exploit]. - -Table: Exploitation Decision Values - -| Value | Definition | -| :--- | :------------ | -| None | There is no evidence of active exploitation and no public proof of concept (PoC) of how to exploit the vulnerability. | -| PoC
(Proof of Concept) | One of the following cases is true: (1) exploit code is sold or traded on underground or restricted fora; (2) a typical public PoC in places such as Metasploit or ExploitDB; or (3) the vulnerability has a well-known method of exploitation. Some examples of condition (3) are open-source web proxies serve as the PoC code for how to exploit any vulnerability in the vein of improper validation of TLS certificates. As another example, Wireshark serves as a PoC for packet replay attacks on ethernet or WiFi networks. | -| Active | Shared, observable, reliable evidence that the exploit is being used in the wild by real attackers; there is credible public reporting. | - - -### Gathering Information About Exploitation -[@householder2020historical] presents a method for searching the GitHub repositories of open-source exploit databases. -This method could be employed to gather information about whether [PoC](#exploitation) is true. -However, part (3) of [PoC](#exploitation) would not be represented in such a search, so more information gathering would be needed. -For part (3), perhaps we could construct a mapping of CWE-IDs which always represent vulnerabilities with well-known methods of exploitation. -For example, CWE-295, [Improper Certificate Validation -](https://cwe.mitre.org/data/definitions/295.html), and its child CWEs, describe improper validation of TLS certificates. -These CWE-IDs could always be marked as [PoC](#exploitation) since that meets condition (3) in the definition. -A comprehensive set of suggested CWE-IDs for this purpose is future work. - -Gathering information for [active](#exploitation) is a bit harder. -If the vulnerability has a name or public identifier (such as a CVE-ID), a search of news websites, Twitter, the vendor's vulnerability description, and public vulnerability databases for mentions of exploitation is generally adequate. -However, if the organization has the ability to detect exploitation attempts—for instance, through reliable and precise IDS signatures based on a public PoC—then detection of exploitation attempts also signals that [active](#exploitation) is the right choice. -Determining which vulnerability a novel piece of malware uses may be time consuming, requiring reverse engineering and a lot of trial and error. -Additionally, capable incident detection and analysis capabilities are required to make reverse engineering possible. -Because most organizations do not conduct these processes fully for most incidents, information about which vulnerabilities are being actively exploited generally comes from public reporting by organizations that do conduct these processes. -As long as those organizations also share detection methods and signatures, the results are usually quickly corroborated by the community. -For these reasons, we assess public reporting by established security community members to be a good information source for [active](#exploitation); however, one should not assume it is complete. - -The description for [none](#exploitation) says that there is no **evidence** of active exploitation. -This framing admits that an analyst may not be able to detect or know about every attack. -An analyst should feel comfortable selecting [none](#exploitation) if they (or their search scripts) have performed searches in the appropriate places for public PoCs and active exploitation (as described above) and found none. -Acknowledging that [*Exploitation*](#exploitation) values can change relatively quickly, we recommend conducting these searches frequently: if they can be automated to the organization's satisfaction, perhaps once a day (see also [Guidance on Communicating Results](#guidance-on-communicating-results)). - -## Technical Impact -> Technical Impact of Exploiting the Vulnerability - -When evaluating [*Technical Impact*](#technical-impact), recall the scope definition in the [Scope Section](#scope). -Total control is relative to the affected component where the vulnerability resides. -If a vulnerability discloses authentication or authorization credentials to the system, this information disclosure should also be scored as “total” if those credentials give an adversary total control of the component. - -As mentioned in [Current State of Practice](#current-state-of-practice), the scope of SSVC is just those situations in which there is a vulnerability. -Our definition of **vulnerability** is based on the determination that some security policy is violated. -We consider a security policy violation to be a technical impact—or at least, a security policy violation must have some technical instantiation. -Therefore, if there is a vulnerability then there must be some technical impact. - -Table: Technical Impact Decision Values - -| Value | Definition | -| :--- | :------------- | -| Partial | The exploit gives the adversary *limited* control over, or information exposure about, the behavior of the software that contains the vulnerability. Or the exploit gives the adversary an importantly low stochastic opportunity for total control. In this context, “low” means that the attacker cannot reasonably make enough attempts to overcome the low chance of each attempt not working. Denial of service is a form of limited control over the behavior of the vulnerable component. | -| Total | The exploit gives the adversary *total* control over the behavior of the software, or it gives total disclosure of all information on the system that contains the vulnerability | - - -### Gathering Information About Technical Impact - -Assessing [*Technical Impact*](#technical-impact) amounts to assessing the degree of control over the vulnerable component the attacker stands to gain by exploiting the vulnerability. -One way to approach this analysis is to ask whether the control gained is *total* or not. -If it is not total, it is *partial*. -If an answer to one of the following questions is _yes_, then control is *total*. -After exploiting the vulnerability, - - can the attacker install and run arbitrary software? - - can the attacker trigger all the actions that the vulnerable component can perform? - - does the attacker get an account with full privileges to the vulnerable component (administrator or root user accounts, for example)? - -This list is an evolving set of heuristics. -If you find a vulnerability that should have [*total*](#technical-impact) [*Technical Impact*](#technical-impact) but that does not answer yes to any of these questions, please describe the example and what question we might add to this list in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). - -## Utility -> The Usefulness of the Exploit to the Adversary - -[*Utility*](#utility) estimates an adversary's benefit compared to their effort based on the assumption that they can exploit the vulnerability. -[*Utility*](#utility) is independent from the state of [*Exploitation*](#exploitation), which measures whether a set of adversaries have ready access to exploit code or are in fact exploiting the vulnerability. -In economic terms, [*Exploitation*](#exploitation) measures whether the **capital cost** of producing reliable exploit code has been paid or not. -[*Utility*](#utility) estimates the **marginal cost** of each exploitation event. -More plainly, [*Utility*](#utility) is about how much an adversary might benefit from a campaign using the vulnerability in question, whereas [*Exploitation*](#exploitation) is about how easy it would be to start such a campaign or if one is already underway. - - -Heuristically, we base Utility on a combination of the value density of vulnerable components and whether potential exploitation is automatable. -This framing makes it easier to analytically derive these categories from a description of the vulnerability and the affected component. -[*Automatable*](#automatable) as ([*no*](#automatable) or [*yes*](#automatable)) and [*Value Density*](#value-density) as ([*diffuse*](#value-density) or [*concentrated*](#value-density)) define those decision points. - -Roughly, [*Utility*](#utility) is a combination of two things: (1) the value of each exploitation event and (2) the ease and speed with which the adversary can cause exploitation events. We define [*Utility*](#utility) as laborious, efficient, or super effective, as described in [Utility Decision Values](#table-utility). [The next table](#table-utility-2) is an equivalent expression of [*Utility*](#utility) that resembles a lookup table in a program. - -Table: Utility Decision Values - -| Value | Definition | -| :--- | :---------- | -| Laborious | *No* to automatable and diffuse value | -| Efficient | {*Yes* to automatable and diffuse value} OR {*No* to automatable and concentrated value} | -| Super Effective | *Yes* to automatable and concentrated value | - -Table: Utility to the Adversary, as a Combination of Automatable and Value Density - -| *Automatable* | *Value Density* | *Utility* | -| ----------- | --------------- | --: | -| *no* | *diffuse* | laborious | -| *no* | *concentrated* | efficient | -| *yes* | *diffuse* | efficient | -| *yes* | *concentrated* | super effective | - - - -## Automatable - -[*Automatable*](#automatable) captures the answer to the question “Can an attacker reliably automate creating exploitation events for this vulnerability?” This metric can take the values *no* or *yes*: - - - [*no*](#automatable): Attackers cannot reliably automate steps 1-4 of the kill chain - [@hutchins2011intelligence] for this vulnerability. These - steps are (1) reconnaissance, (2) weaponization, (3) delivery, and (4) exploitation. - Reasons why a step may not be reliably automatable could include the following: - 1. the vulnerable component is not searchable or enumerable on the network, - 2. weaponization may require human direction for each target, - 3. delivery may require channels that widely deployed network security configurations block, and - 4. exploitation is not reliable, due to exploit-prevention techniques enabled by default; ASLR is an example of an exploit-prevention tool. - - - [*yes*](#automatable): Attackers can reliably automate steps 1-4 of the kill chain. - If the vulnerability allows remote code execution or command injection, the expected response should be yes. - -Due to vulnerability chaining, there is some nuance as to whether reconnaissance can be automated. For example, consider a vulnerability A. -If the systems vulnerable to A are usually not openly connected to incoming traffic (that is, [*Exposure*](#exposure) is [small](#exposure) or [controlled](#exposure)), reconnaissance probably cannot be automated (scans would be blocked, etc.). This would make Automatable equal to [no](#automatable) for vulnerability A. -However, suppose that another vulnerability B where Automatable is equal to [yes](#automatiability) can be reliably used to chain to vulnerability A. -This automates the _reconnaissance_ of vulnerable systems. -In this situation, the analyst should continue to analyze vulnerability A to understand whether the remaining steps in the kill chain can be automated. - -### Gathering Information About Automatable - -An analyst should be able to sketch the automation scenario and how it either does or does not satisfy each of the four kill chain steps. -Once one step is not satisfied, the analyst can stop and select [*no*](#automatable). -Code that demonstrably automates all four kill chain steps certainly satisfies as a sketch. -We say sketch to indicate that plausible arguments, such as convincing psuedocode of an automation pathway for each step, are also adequate evidence in favor of a [*yes*](#automatable) to [*Automatable*](#automatable). - -Like all SSVC decision points, [*Automatable*](#automatable) should capture the analyst's best understanding of plausible scenarios at the time of the analysis. -An answer of *no* does not mean that it is absolutely inconceivable to automate exploitation in any scenario. -It means the analyst is not able to sketch a plausible path through all four kill chain steps. -“Plausible” sketches should account for widely deployed network and host-based defenses. -Liveness of Internet-connected services means quite a few overlapping things [@bano2018scanning]. -For most vulnerabilities, an open port does not automatically mean that reconnaissance, weaponization, and delivery are automatable. -Furthermore, discovery of a vulnerable service is not automatable in a situation where only two hosts are misconfigured to expose the service out of 2 million hosts that are properly configured. -As discussed in in [Reasoning Steps Forward](#reasoning-steps-forward), the analyst should consider *credible* effects based on *known* use cases of the software system to be pragmatic about scope and providing values to decision points. - -## Value Density - -[*Value Density*](#value-density) is described as *diffuse* or *concentrated* based on the resources that the adversary will gain control over with a single exploitation event: - - - [*diffuse*](#value-density): The system that contains the vulnerable component has - limited resources. That is, the resources that the adversary will - gain control over with a single exploitation event are relatively - small. Examples of systems with diffuse value are email accounts, - most consumer online banking accounts, common cell phones, and most - personal computing resources owned and maintained by users. (A - “user” is anyone whose professional task is something other than - the maintenance of the system or component. As with [*Safety Impact*](#safety-impact), - a “system operator” is anyone who is professionally responsible for - the proper operation or maintenance of a system.) - - - [*concentrated*](#value-density): The system that contains the vulnerable component - is rich in resources. Heuristically, such systems are often the - direct responsibility of “system operators” rather than users. - Examples of concentrated value are database systems, Kerberos - servers, web servers hosting login pages, and cloud service - providers. However, usefulness and uniqueness of the resources on - the vulnerable system also inform value density. For example, - encrypted mobile messaging platforms may have concentrated value, - not because each phone’s messaging history has a particularly large - amount of data, but because it is uniquely valuable to law - enforcement. - -### Gathering Information About Value Density - -The heuristics presented in the [*Value Density*](#value-density) definitions involve whether the system is usually maintained by a dedicated professional, although we have noted some exceptions (such as encrypted mobile messaging applications). -If there are additional counterexamples to this heuristic, please describe them and the reasoning why the system should have the alternative decision value in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). - -An analyst might use market research reports or Internet telemetry data to assess an unfamiliar product. -Organizations such as Gartner produce research on the market position and product comparisons for a large variety of systems. -These generally identify how a product is deployed, used, and maintained. -An organization's own marketing materials are a less reliable indicator of how a product is used, or at least how the organization expects it to be used. - -Network telemetry can inform how many instances of a software system are connected to a network. -Such telemetry is most reliable for the supplier of the software, especially if software licenses are purchased and checked. -Measuring how many instances of a system are in operation is useful, but having more instances does not mean that the software is a densely valuable target. -However, market penetration greater than approximately 75% generally means that the product uniquely serves a particular market segment or purpose. -This line of reasoning is what supports a determination that an ubiquitous encrypted mobile messaging application should be considered to have a [*concentrated*](#value-density) Value Density. - -### Alternative Utility Outputs - -Alternative heuristics can plausibly be used as proxies for adversary utility. -One example is the value of the vulnerability if it were sold on the open market. -Some firms, such as [Zerodium](https://zerodium.com/program.html), make such pricing structures public. -The valuable exploits track the [*Automatable*](#automatable) and [*Value Density*](#value-density) heuristics for the most part. -Within a single system—whether it is Apache, Windows, iOS or WhatsApp—more successfully automated steps in the kill lead to higher exploit value. -Remote code execution with sandbox escape and without user interaction are the most valuable exploits, and these features describe automation of the relevant kill chain steps. - -How equivalently [*Automatable*](#automatable) exploits for different systems are priced relative to each other is more idiosyncratic. -Price does not only track the [*Value Density*](#value-density) of the system, but presumably also the existing supply of exploits and the installation distribution among the targets of Zerodium’s customers. -Currently, we simplify the analysis and ignore these factors. -However, future work should look for and prevent large mismatches between the outputs of the [*Utility*](#utility) decision point and the exploit markets. diff --git a/doc/md_src_files/05_01_00_exploitation.md b/doc/md_src_files/05_01_00_exploitation.md new file mode 100644 index 00000000..f259a177 --- /dev/null +++ b/doc/md_src_files/05_01_00_exploitation.md @@ -0,0 +1,37 @@ +## Exploitation +> Evidence of Active Exploitation of a Vulnerability + +The intent of this measure is the present state of exploitation of the vulnerability. The intent is not to predict future exploitation but only to acknowledge the current state of affairs. Predictive systems, such as EPSS, could be used to augment this decision or to notify stakeholders of likely changes [@jacobs2019exploit]. + +Table: Exploitation Decision Values + +| Value | Definition | +| :--- | :------------ | +| None | There is no evidence of active exploitation and no public proof of concept (PoC) of how to exploit the vulnerability. | +| PoC
(Proof of Concept) | One of the following cases is true: (1) exploit code is sold or traded on underground or restricted fora; (2) a typical public PoC in places such as Metasploit or ExploitDB; or (3) the vulnerability has a well-known method of exploitation. Some examples of condition (3) are open-source web proxies serve as the PoC code for how to exploit any vulnerability in the vein of improper validation of TLS certificates. As another example, Wireshark serves as a PoC for packet replay attacks on ethernet or WiFi networks. | +| Active | Shared, observable, reliable evidence that the exploit is being used in the wild by real attackers; there is credible public reporting. | + + +### Gathering Information About Exploitation +[@householder2020historical] presents a method for searching the GitHub repositories of open-source exploit databases. +This method could be employed to gather information about whether [PoC](#exploitation) is true. +However, part (3) of [PoC](#exploitation) would not be represented in such a search, so more information gathering would be needed. +For part (3), perhaps we could construct a mapping of CWE-IDs which always represent vulnerabilities with well-known methods of exploitation. +For example, CWE-295, [Improper Certificate Validation +](https://cwe.mitre.org/data/definitions/295.html), and its child CWEs, describe improper validation of TLS certificates. +These CWE-IDs could always be marked as [PoC](#exploitation) since that meets condition (3) in the definition. +A comprehensive set of suggested CWE-IDs for this purpose is future work. + +Gathering information for [active](#exploitation) is a bit harder. +If the vulnerability has a name or public identifier (such as a CVE-ID), a search of news websites, Twitter, the vendor's vulnerability description, and public vulnerability databases for mentions of exploitation is generally adequate. +However, if the organization has the ability to detect exploitation attempts—for instance, through reliable and precise IDS signatures based on a public PoC—then detection of exploitation attempts also signals that [active](#exploitation) is the right choice. +Determining which vulnerability a novel piece of malware uses may be time consuming, requiring reverse engineering and a lot of trial and error. +Additionally, capable incident detection and analysis capabilities are required to make reverse engineering possible. +Because most organizations do not conduct these processes fully for most incidents, information about which vulnerabilities are being actively exploited generally comes from public reporting by organizations that do conduct these processes. +As long as those organizations also share detection methods and signatures, the results are usually quickly corroborated by the community. +For these reasons, we assess public reporting by established security community members to be a good information source for [active](#exploitation); however, one should not assume it is complete. + +The description for [none](#exploitation) says that there is no **evidence** of active exploitation. +This framing admits that an analyst may not be able to detect or know about every attack. +An analyst should feel comfortable selecting [none](#exploitation) if they (or their search scripts) have performed searches in the appropriate places for public PoCs and active exploitation (as described above) and found none. +Acknowledging that [*Exploitation*](#exploitation) values can change relatively quickly, we recommend conducting these searches frequently: if they can be automated to the organization's satisfaction, perhaps once a day (see also [Guidance on Communicating Results](#guidance-on-communicating-results)). diff --git a/doc/md_src_files/05_02_00_technical_impact.md b/doc/md_src_files/05_02_00_technical_impact.md new file mode 100644 index 00000000..f43f2a2f --- /dev/null +++ b/doc/md_src_files/05_02_00_technical_impact.md @@ -0,0 +1,34 @@ +## Technical Impact +> Technical Impact of Exploiting the Vulnerability + +When evaluating [*Technical Impact*](#technical-impact), recall the scope definition in the [Scope Section](#scope). +Total control is relative to the affected component where the vulnerability resides. +If a vulnerability discloses authentication or authorization credentials to the system, this information disclosure should also be scored as “total” if those credentials give an adversary total control of the component. + +As mentioned in [Current State of Practice](#current-state-of-practice), the scope of SSVC is just those situations in which there is a vulnerability. +Our definition of **vulnerability** is based on the determination that some security policy is violated. +We consider a security policy violation to be a technical impact—or at least, a security policy violation must have some technical instantiation. +Therefore, if there is a vulnerability then there must be some technical impact. + +Table: Technical Impact Decision Values + +| Value | Definition | +| :--- | :------------- | +| Partial | The exploit gives the adversary *limited* control over, or information exposure about, the behavior of the software that contains the vulnerability. Or the exploit gives the adversary an importantly low stochastic opportunity for total control. In this context, “low” means that the attacker cannot reasonably make enough attempts to overcome the low chance of each attempt not working. Denial of service is a form of limited control over the behavior of the vulnerable component. | +| Total | The exploit gives the adversary *total* control over the behavior of the software, or it gives total disclosure of all information on the system that contains the vulnerability | + + +### Gathering Information About Technical Impact + +Assessing [*Technical Impact*](#technical-impact) amounts to assessing the degree of control over the vulnerable component the attacker stands to gain by exploiting the vulnerability. +One way to approach this analysis is to ask whether the control gained is *total* or not. +If it is not total, it is *partial*. +If an answer to one of the following questions is _yes_, then control is *total*. +After exploiting the vulnerability, + - can the attacker install and run arbitrary software? + - can the attacker trigger all the actions that the vulnerable component can perform? + - does the attacker get an account with full privileges to the vulnerable component (administrator or root user accounts, for example)? + +This list is an evolving set of heuristics. +If you find a vulnerability that should have [*total*](#technical-impact) [*Technical Impact*](#technical-impact) but that does not answer yes to any of these questions, please describe the example and what question we might add to this list in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). + diff --git a/doc/md_src_files/05_03_00_automatable.md b/doc/md_src_files/05_03_00_automatable.md new file mode 100644 index 00000000..0b245752 --- /dev/null +++ b/doc/md_src_files/05_03_00_automatable.md @@ -0,0 +1,38 @@ +## Automatable + +[*Automatable*](#automatable) captures the answer to the question “Can an attacker reliably automate creating exploitation events for this vulnerability?” This metric can take the values *no* or *yes*: + + - [*no*](#automatable): Attackers cannot reliably automate steps 1-4 of the kill chain + [@hutchins2011intelligence] for this vulnerability. These + steps are (1) reconnaissance, (2) weaponization, (3) delivery, and (4) exploitation. + Reasons why a step may not be reliably automatable could include the following: + 1. the vulnerable component is not searchable or enumerable on the network, + 2. weaponization may require human direction for each target, + 3. delivery may require channels that widely deployed network security configurations block, and + 4. exploitation is not reliable, due to exploit-prevention techniques enabled by default; ASLR is an example of an exploit-prevention tool. + + - [*yes*](#automatable): Attackers can reliably automate steps 1-4 of the kill chain. + If the vulnerability allows remote code execution or command injection, the expected response should be yes. + +Due to vulnerability chaining, there is some nuance as to whether reconnaissance can be automated. For example, consider a vulnerability A. +If the systems vulnerable to A are usually not openly connected to incoming traffic (that is, [*Exposure*](#exposure) is [small](#exposure) or [controlled](#exposure)), reconnaissance probably cannot be automated (scans would be blocked, etc.). This would make Automatable equal to [no](#automatable) for vulnerability A. +However, suppose that another vulnerability B where Automatable is equal to [yes](#automatiability) can be reliably used to chain to vulnerability A. +This automates the _reconnaissance_ of vulnerable systems. +In this situation, the analyst should continue to analyze vulnerability A to understand whether the remaining steps in the kill chain can be automated. + +### Gathering Information About Automatable + +An analyst should be able to sketch the automation scenario and how it either does or does not satisfy each of the four kill chain steps. +Once one step is not satisfied, the analyst can stop and select [*no*](#automatable). +Code that demonstrably automates all four kill chain steps certainly satisfies as a sketch. +We say sketch to indicate that plausible arguments, such as convincing psuedocode of an automation pathway for each step, are also adequate evidence in favor of a [*yes*](#automatable) to [*Automatable*](#automatable). + +Like all SSVC decision points, [*Automatable*](#automatable) should capture the analyst's best understanding of plausible scenarios at the time of the analysis. +An answer of *no* does not mean that it is absolutely inconceivable to automate exploitation in any scenario. +It means the analyst is not able to sketch a plausible path through all four kill chain steps. +“Plausible” sketches should account for widely deployed network and host-based defenses. +Liveness of Internet-connected services means quite a few overlapping things [@bano2018scanning]. +For most vulnerabilities, an open port does not automatically mean that reconnaissance, weaponization, and delivery are automatable. +Furthermore, discovery of a vulnerable service is not automatable in a situation where only two hosts are misconfigured to expose the service out of 2 million hosts that are properly configured. +As discussed in in [Reasoning Steps Forward](#reasoning-steps-forward), the analyst should consider *credible* effects based on *known* use cases of the software system to be pragmatic about scope and providing values to decision points. + diff --git a/doc/md_src_files/05_04_00_value_density.md b/doc/md_src_files/05_04_00_value_density.md new file mode 100644 index 00000000..b924bfc1 --- /dev/null +++ b/doc/md_src_files/05_04_00_value_density.md @@ -0,0 +1,42 @@ +## Value Density + +[*Value Density*](#value-density) is described as *diffuse* or *concentrated* based on the resources that the adversary will gain control over with a single exploitation event: + + - [*diffuse*](#value-density): The system that contains the vulnerable component has + limited resources. That is, the resources that the adversary will + gain control over with a single exploitation event are relatively + small. Examples of systems with diffuse value are email accounts, + most consumer online banking accounts, common cell phones, and most + personal computing resources owned and maintained by users. (A + “user” is anyone whose professional task is something other than + the maintenance of the system or component. As with [*Safety Impact*](#safety-impact), + a “system operator” is anyone who is professionally responsible for + the proper operation or maintenance of a system.) + + - [*concentrated*](#value-density): The system that contains the vulnerable component + is rich in resources. Heuristically, such systems are often the + direct responsibility of “system operators” rather than users. + Examples of concentrated value are database systems, Kerberos + servers, web servers hosting login pages, and cloud service + providers. However, usefulness and uniqueness of the resources on + the vulnerable system also inform value density. For example, + encrypted mobile messaging platforms may have concentrated value, + not because each phone’s messaging history has a particularly large + amount of data, but because it is uniquely valuable to law + enforcement. + +### Gathering Information About Value Density + +The heuristics presented in the [*Value Density*](#value-density) definitions involve whether the system is usually maintained by a dedicated professional, although we have noted some exceptions (such as encrypted mobile messaging applications). +If there are additional counterexamples to this heuristic, please describe them and the reasoning why the system should have the alternative decision value in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). + +An analyst might use market research reports or Internet telemetry data to assess an unfamiliar product. +Organizations such as Gartner produce research on the market position and product comparisons for a large variety of systems. +These generally identify how a product is deployed, used, and maintained. +An organization's own marketing materials are a less reliable indicator of how a product is used, or at least how the organization expects it to be used. + +Network telemetry can inform how many instances of a software system are connected to a network. +Such telemetry is most reliable for the supplier of the software, especially if software licenses are purchased and checked. +Measuring how many instances of a system are in operation is useful, but having more instances does not mean that the software is a densely valuable target. +However, market penetration greater than approximately 75% generally means that the product uniquely serves a particular market segment or purpose. +This line of reasoning is what supports a determination that an ubiquitous encrypted mobile messaging application should be considered to have a [*concentrated*](#value-density) Value Density. diff --git a/doc/md_src_files/05_05_00_utility.md b/doc/md_src_files/05_05_00_utility.md new file mode 100644 index 00000000..2603c895 --- /dev/null +++ b/doc/md_src_files/05_05_00_utility.md @@ -0,0 +1,47 @@ +## Utility +> The Usefulness of the Exploit to the Adversary + +[*Utility*](#utility) estimates an adversary's benefit compared to their effort based on the assumption that they can exploit the vulnerability. +[*Utility*](#utility) is independent from the state of [*Exploitation*](#exploitation), which measures whether a set of adversaries have ready access to exploit code or are in fact exploiting the vulnerability. +In economic terms, [*Exploitation*](#exploitation) measures whether the **capital cost** of producing reliable exploit code has been paid or not. +[*Utility*](#utility) estimates the **marginal cost** of each exploitation event. +More plainly, [*Utility*](#utility) is about how much an adversary might benefit from a campaign using the vulnerability in question, whereas [*Exploitation*](#exploitation) is about how easy it would be to start such a campaign or if one is already underway. + +Heuristically, we base Utility on a combination of the value density of vulnerable components and whether potential exploitation is automatable. +This framing makes it easier to analytically derive these categories from a description of the vulnerability and the affected component. +[*Automatable*](#automatable) as ([*no*](#automatable) or [*yes*](#automatable)) and [*Value Density*](#value-density) as ([*diffuse*](#value-density) or [*concentrated*](#value-density)) define those decision points. + +Roughly, [*Utility*](#utility) is a combination of two things: (1) the value of each exploitation event and (2) the ease and speed with which the adversary can cause exploitation events. We define [*Utility*](#utility) as laborious, efficient, or super effective, as described in [Utility Decision Values](#table-utility). [The next table](#table-utility-2) is an equivalent expression of [*Utility*](#utility) that resembles a lookup table in a program. + +Table: Utility Decision Values + +| Value | Definition | +| :--- | :---------- | +| Laborious | *No* to automatable and diffuse value | +| Efficient | {*Yes* to automatable and diffuse value} OR {*No* to automatable and concentrated value} | +| Super Effective | *Yes* to automatable and concentrated value | + +Table: Utility to the Adversary, as a Combination of Automatable and Value Density + +| *Automatable* | *Value Density* | *Utility* | +| ----------- | --------------- | --: | +| *no* | *diffuse* | laborious | +| *no* | *concentrated* | efficient | +| *yes* | *diffuse* | efficient | +| *yes* | *concentrated* | super effective | + + + +### Alternative Utility Outputs + +Alternative heuristics can plausibly be used as proxies for adversary utility. +One example is the value of the vulnerability if it were sold on the open market. +Some firms, such as [Zerodium](https://zerodium.com/program.html), make such pricing structures public. +The valuable exploits track the [*Automatable*](#automatable) and [*Value Density*](#value-density) heuristics for the most part. +Within a single system—whether it is Apache, Windows, iOS or WhatsApp—more successfully automated steps in the kill lead to higher exploit value. +Remote code execution with sandbox escape and without user interaction are the most valuable exploits, and these features describe automation of the relevant kill chain steps. + +How equivalently [*Automatable*](#automatable) exploits for different systems are priced relative to each other is more idiosyncratic. +Price does not only track the [*Value Density*](#value-density) of the system, but presumably also the existing supply of exploits and the installation distribution among the targets of Zerodium’s customers. +Currently, we simplify the analysis and ignore these factors. +However, future work should look for and prevent large mismatches between the outputs of the [*Utility*](#utility) decision point and the exploit markets. From cd3d846d0393aba4e965c54ae24a1cdf4e41f7d9 Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 14:00:32 -0400 Subject: [PATCH 03/12] Break up second half of chapter 5 into one file per section. --- doc/md_src_files/05_06_00_safety_impact.md | 155 ------------------- doc/md_src_files/05_07_00_mission_impact.md | 24 +++ doc/md_src_files/05_08_00_human_impact.md | 90 +++++++++++ doc/md_src_files/05_09_00_system_exposure.md | 39 +++++ 4 files changed, 153 insertions(+), 155 deletions(-) create mode 100644 doc/md_src_files/05_07_00_mission_impact.md create mode 100644 doc/md_src_files/05_08_00_human_impact.md create mode 100644 doc/md_src_files/05_09_00_system_exposure.md diff --git a/doc/md_src_files/05_06_00_safety_impact.md b/doc/md_src_files/05_06_00_safety_impact.md index e65566ef..e1c8af50 100644 --- a/doc/md_src_files/05_06_00_safety_impact.md +++ b/doc/md_src_files/05_06_00_safety_impact.md @@ -236,158 +236,3 @@ Table: Public Safety Impact Decision Values Deployers are anticipated to have a more fine-grained perspective on the safety impacts broadly defined in [Safety Impact](#table-safety-impact). We defer this topic for now because we combine it with [*Mission Impact*](#mission-impact) to simplify implementation for deployers. - -## Mission Impact -> Impact on Mission Essential Functions of the Organization - -A **mission essential function (MEF)** is a function “directly related to accomplishing the organization’s mission as set forth in its statutory or executive charter” [@FCD2_2017, page A-1]. Identification and prioritization of mission essential functions enables effective continuity planning or crisis planning. Mission Essential Functions are in effect critical activities within an organization that are used to identify key assets, supporting tasks, and resources that an organization requires to remain operational in a crises situation, and so must be included in its planning process. During an event, key resources may be limited and personnel may be unavailable, so organizations must consider these factors and validate assumptions when identifying, validating, and prioritizing MEFs. - -When reviewing the list of organizational functions, an organization must first identify whether a function is essential or non-essential. The distinction between these two categories is whether or not an organization must perform a function during a disruption to normal operations and must continue performance during emergencies [@FCD2_2017, page B-2]. Essential functions are both important and urgent. Functions that can be deferred until after an emergency are identified as non-essential. - -As mission essential functions are most clearly defined for government agencies, stakeholders in other sectors may be familiar with different terms of art from continuity planning. For example, infrastructure providers in the US may better align with [National Critical Functions](https://www.cisa.gov/national-critical-functions). Private sector businesses may better align with [operational and financial impacts](https://www.ready.gov/sites/default/files/2020-03/business-impact-analysis-worksheet.pdf) in a [business continuity plan](https://www.ready.gov/business-continuity-plan). While the processes, terminology, and audience for these different frameworks differ, they all can provide a sense of the criticality of an asset or assets within the scope of the stakeholder conducting the cyber vulnerability prioritization with SSVC. In that sense they all function quite similarly within SSVC. Organizations should use whatever is most appropriate for their stakeholder context, with Mission Essential Function analysis serving as a fully worked example in the SSVC documents. - -Table: Mission Impact Decision Values - -| Value | Definition | -| :--- | :---------- | -| Degraded | Little to no impact up to degradation of non-essential functions; chronic degradation would eventually harm essential functions | -| MEF Support Crippled | Activities that directly support essential functions are crippled; essential functions continue for a time | -| MEF Failure | Any one mission essential function fails for period of time longer than acceptable; overall mission of the organization degraded but can still be accomplished for a time | -| Mission Failure | Multiple or all mission essential functions fail; ability to recover those functions degraded; organization’s ability to deliver its overall mission fails | - -### Gathering Information About Mission Impact - -The factors that influence the mission impact level are diverse. This paper does not exhaustively discuss how a stakeholder should answer a question; that is a topic for future work. At a minimum, understanding mission impact should include gathering information about the critical paths that involve vulnerable components, viability of contingency measures, and resiliency of the systems that support the mission. There are various sources of guidance on how to gather this information; see for example the FEMA guidance in Continuity Directive 2 [@FCD2_2017] or OCTAVE FORTE [@tucker2018octave]. This is part of risk management more broadly. It should require the vulnerability management team to interact with more senior management to understand mission priorities and other aspects of risk mitigation. - -As a heuristic, [*Utility*](#utility) might constrain [*Mission Impact*](#mission-impact) if both are not used in the same decision tree. -For example, if the [*Utility*](#utility) is [*super effective*](#utility), then [*Mission Impact*](#mission-impact) is at least [*MEF support crippled*](#mission-impact). - -## Human Impact - > Combined Situated Safety and Mission Impact - -In pilot implementations of SSVC, we received feedback that organizations tend to think of mission and safety impacts as if they were combined into a single factor: in other words, the priority increases regardless which of the two impact factors was increased. -We therefore combine Situated Safety and Mission Impact for deployers into a single _Human Impact_ factor as a dimension reduction step as follows. -We observe that the day-to-day operations of an organization often have already built in a degree of tolerance to small-scale variance in mission impacts. -Thus in our opinion we need only concern ourselves with discriminating well at the upper end of the scale. -Therefore we combine the two lesser mission impacts of degraded and MEF support crippled into a single category, while retaining the distinction between MEF Failure and Mission Failure at the extreme. -This gives us three levels of mission impact to work with. - -On the other hand, most organizations tend to have lower tolerance for variance in safety. -Even small deviations in safety are unlikely to go unnoticed or unaddressed. -We suspect that the presence of regulatory oversight for safety issues and its absence at the lower end of the mission impact scale influences this behavior. -Because of this higher sensitivity to safety concerns, we chose to retain a four-level resolution for the safety dimension. -We then combine Mission Impact with Situated Safety impact and map them onto a 4-tiered scale (Low, Medium, High, Very High). -The mapping is shown in the following table. - -Table: Combining Mission and Situated Safety Impact into Human Impact - -| Situated Safety Impact | Mission Impact | Combined Value (Human Impact) | -| -----: | :----- | :---: | -| None/Minor | Degraded/Crippled | Low | -| None/Minor | MEF Failure | Medium | -| None/Minor | Mission Failure | Very High | -| Major | Degraded/Crippled | Medium | -| Major | MEF Failure | High | -| Major | Mission Failure | Very High | -| Hazardous | Degraded/Crippled | High | -| Hazardous | MEF Failure | High | -| Hazardous | Mission Failure | Very High | -| Catastrophic | Degraded/Crippled | Very High | -| Catastrophic | MEF Failure | Very High | -| Catastrophic | Mission Failure | Very High | - - - - -### Safety and Mission Impact Decision Points for Industry Sectors - -We expect to encounter diversity in both safety and mission impacts across different organizations. However, we also anticipate a degree of commonality of impacts to arise across organizations within a given industry sector. For example, different industry sectors may have different use cases for the same software. -Therefore, vulnerability information providers—that is, vulnerability databases, Information Sharing and Analysis Organizations (ISAOs), or Information Sharing and Analysis Centers (ISACs)—may provide SSVC information tailored as appropriate to their constituency's safety and mission concerns. -For considerations on how organizations might communicate SSVC information to their constituents, see [Guidance on Communicating Results](#guidance-on-communicating-results). - - -## System Exposure -> The Accessible Attack Surface of the Affected System or Service - -Measuring the attack surface precisely is difficult, and we do not propose to perfectly delineate between small and controlled access. -Exposure should be judged against the system in its deployed context, which may differ from how it is commonly expected to be deployed. -For example, the exposure of a device on a vehicle's CAN bus will vary depending on the presence of a cellular telemetry device on the same bus. - -If a vulnerability cannot be remediated, other mitigations may be used. -Usually, the effect of these mitigations is to reduce exposure of the vulnerable component. -Therefore, a deployer’s response to Exposure may change if such mitigations are put in place. -If a mitigation changes exposure and thereby reduces the priority of a vulnerability, that mitigation can be considered a success. -Whether that mitigation allows the deployer to defer further action varies according to each case. - -Table: System Exposure Decision Values - -| Value | Definition | -| :--- | :------------ | -| Small | Local service or program; highly controlled network | -| Controlled | Networked service with some access restrictions or mitigations already in place (whether locally or on the network). A successful mitigation must reliably interrupt the adversary’s attack, which requires the attack is detectable both reliably and quickly enough to respond. *Controlled* covers the situation in which a vulnerability can be exploited through chaining it with other vulnerabilities. The assumption is that the number of steps in the attack path is relatively low; if the path is long enough that it is implausible for an adversary to reliably execute it, then *exposure* should be *small*. | -| Open | Internet or another widely accessible network where access cannot plausibly be restricted or controlled (e.g., DNS servers, web servers, VOIP servers, email servers) | - -### Gathering Information About System Exposure - -[*System Exposure*](#system-exposure) is primarily used by Deployers, so the question is about whether some specific system is in fact exposed, not a hypothetical or aggregate question about systems of that type. -Therefore, it generally has a concrete answer, even though it may vary from vulnerable component to vulnerable component, based on their respective configurations. - -[*System Exposure*](#system-exposure) can be readily informed by network scanning techniques. -For example, if the vulnerable component is visible on [Shodan](www.shodan.io) or by some other external scanning service, then it is [*open*](#system-exposure). -Network policy or diagrams are also useful information sources, especially for services intentionally open to the Internet such as public web servers. -An analyst should also choose [*open*](#system-exposure) for a phone or PC that connects to the web or email without the usual protections (IP and URL blocking, updated firewalls, etc.). - -Distinguishing between [*small*](#system-exposure) and [*controlled*](#system-exposure) is more nuanced. -If [*open*](#system-exposure) has been ruled out, some suggested heuristics for differentiating the other two are as follows. -Apply these heuristics in order and stop when one of them applies. - - If the system's networking and communication interfaces have been physically removed or disabled, choose [*small*](#system-exposure). - - If [*Automatable*](#automatable) is [*yes*](#automatable), then choose [*controlled*](#system-exposure). The reasoning behind this heuristic is that if reconnaissance through exploitation is automatable, then the usual deployment scenario exposes the system sufficiently that access can be automated, which contradicts the expectations of [*small*](#system-exposure). - - If the vulnerable component is on a network where other hosts can browse the web or receive email, choose [*controlled*](#system-exposure). - -If you have suggestions for further heuristics, or potential counterexamples to these, please describe the example and reasoning in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). diff --git a/doc/md_src_files/05_07_00_mission_impact.md b/doc/md_src_files/05_07_00_mission_impact.md new file mode 100644 index 00000000..2d0fddc9 --- /dev/null +++ b/doc/md_src_files/05_07_00_mission_impact.md @@ -0,0 +1,24 @@ +## Mission Impact +> Impact on Mission Essential Functions of the Organization + +A **mission essential function (MEF)** is a function “directly related to accomplishing the organization’s mission as set forth in its statutory or executive charter” [@FCD2_2017, page A-1]. Identification and prioritization of mission essential functions enables effective continuity planning or crisis planning. Mission Essential Functions are in effect critical activities within an organization that are used to identify key assets, supporting tasks, and resources that an organization requires to remain operational in a crises situation, and so must be included in its planning process. During an event, key resources may be limited and personnel may be unavailable, so organizations must consider these factors and validate assumptions when identifying, validating, and prioritizing MEFs. + +When reviewing the list of organizational functions, an organization must first identify whether a function is essential or non-essential. The distinction between these two categories is whether or not an organization must perform a function during a disruption to normal operations and must continue performance during emergencies [@FCD2_2017, page B-2]. Essential functions are both important and urgent. Functions that can be deferred until after an emergency are identified as non-essential. + +As mission essential functions are most clearly defined for government agencies, stakeholders in other sectors may be familiar with different terms of art from continuity planning. For example, infrastructure providers in the US may better align with [National Critical Functions](https://www.cisa.gov/national-critical-functions). Private sector businesses may better align with [operational and financial impacts](https://www.ready.gov/sites/default/files/2020-03/business-impact-analysis-worksheet.pdf) in a [business continuity plan](https://www.ready.gov/business-continuity-plan). While the processes, terminology, and audience for these different frameworks differ, they all can provide a sense of the criticality of an asset or assets within the scope of the stakeholder conducting the cyber vulnerability prioritization with SSVC. In that sense they all function quite similarly within SSVC. Organizations should use whatever is most appropriate for their stakeholder context, with Mission Essential Function analysis serving as a fully worked example in the SSVC documents. + +Table: Mission Impact Decision Values + +| Value | Definition | +| :--- | :---------- | +| Degraded | Little to no impact up to degradation of non-essential functions; chronic degradation would eventually harm essential functions | +| MEF Support Crippled | Activities that directly support essential functions are crippled; essential functions continue for a time | +| MEF Failure | Any one mission essential function fails for period of time longer than acceptable; overall mission of the organization degraded but can still be accomplished for a time | +| Mission Failure | Multiple or all mission essential functions fail; ability to recover those functions degraded; organization’s ability to deliver its overall mission fails | + +### Gathering Information About Mission Impact + +The factors that influence the mission impact level are diverse. This paper does not exhaustively discuss how a stakeholder should answer a question; that is a topic for future work. At a minimum, understanding mission impact should include gathering information about the critical paths that involve vulnerable components, viability of contingency measures, and resiliency of the systems that support the mission. There are various sources of guidance on how to gather this information; see for example the FEMA guidance in Continuity Directive 2 [@FCD2_2017] or OCTAVE FORTE [@tucker2018octave]. This is part of risk management more broadly. It should require the vulnerability management team to interact with more senior management to understand mission priorities and other aspects of risk mitigation. + +As a heuristic, [*Utility*](#utility) might constrain [*Mission Impact*](#mission-impact) if both are not used in the same decision tree. +For example, if the [*Utility*](#utility) is [*super effective*](#utility), then [*Mission Impact*](#mission-impact) is at least [*MEF support crippled*](#mission-impact). diff --git a/doc/md_src_files/05_08_00_human_impact.md b/doc/md_src_files/05_08_00_human_impact.md new file mode 100644 index 00000000..8944bdef --- /dev/null +++ b/doc/md_src_files/05_08_00_human_impact.md @@ -0,0 +1,90 @@ +## Human Impact + > Combined Situated Safety and Mission Impact + +In pilot implementations of SSVC, we received feedback that organizations tend to think of mission and safety impacts as if they were combined into a single factor: in other words, the priority increases regardless which of the two impact factors was increased. +We therefore combine Situated Safety and Mission Impact for deployers into a single _Human Impact_ factor as a dimension reduction step as follows. +We observe that the day-to-day operations of an organization often have already built in a degree of tolerance to small-scale variance in mission impacts. +Thus in our opinion we need only concern ourselves with discriminating well at the upper end of the scale. +Therefore we combine the two lesser mission impacts of degraded and MEF support crippled into a single category, while retaining the distinction between MEF Failure and Mission Failure at the extreme. +This gives us three levels of mission impact to work with. + +On the other hand, most organizations tend to have lower tolerance for variance in safety. +Even small deviations in safety are unlikely to go unnoticed or unaddressed. +We suspect that the presence of regulatory oversight for safety issues and its absence at the lower end of the mission impact scale influences this behavior. +Because of this higher sensitivity to safety concerns, we chose to retain a four-level resolution for the safety dimension. +We then combine Mission Impact with Situated Safety impact and map them onto a 4-tiered scale (Low, Medium, High, Very High). +The mapping is shown in the following table. + +Table: Combining Mission and Situated Safety Impact into Human Impact + +| Situated Safety Impact | Mission Impact | Combined Value (Human Impact) | +| -----: | :----- | :---: | +| None/Minor | Degraded/Crippled | Low | +| None/Minor | MEF Failure | Medium | +| None/Minor | Mission Failure | Very High | +| Major | Degraded/Crippled | Medium | +| Major | MEF Failure | High | +| Major | Mission Failure | Very High | +| Hazardous | Degraded/Crippled | High | +| Hazardous | MEF Failure | High | +| Hazardous | Mission Failure | Very High | +| Catastrophic | Degraded/Crippled | Very High | +| Catastrophic | MEF Failure | Very High | +| Catastrophic | Mission Failure | Very High | + + + + +### Safety and Mission Impact Decision Points for Industry Sectors + +We expect to encounter diversity in both safety and mission impacts across different organizations. However, we also anticipate a degree of commonality of impacts to arise across organizations within a given industry sector. For example, different industry sectors may have different use cases for the same software. +Therefore, vulnerability information providers—that is, vulnerability databases, Information Sharing and Analysis Organizations (ISAOs), or Information Sharing and Analysis Centers (ISACs)—may provide SSVC information tailored as appropriate to their constituency's safety and mission concerns. +For considerations on how organizations might communicate SSVC information to their constituents, see [Guidance on Communicating Results](#guidance-on-communicating-results). + + diff --git a/doc/md_src_files/05_09_00_system_exposure.md b/doc/md_src_files/05_09_00_system_exposure.md new file mode 100644 index 00000000..9c0e503b --- /dev/null +++ b/doc/md_src_files/05_09_00_system_exposure.md @@ -0,0 +1,39 @@ +## System Exposure +> The Accessible Attack Surface of the Affected System or Service + +Measuring the attack surface precisely is difficult, and we do not propose to perfectly delineate between small and controlled access. +Exposure should be judged against the system in its deployed context, which may differ from how it is commonly expected to be deployed. +For example, the exposure of a device on a vehicle's CAN bus will vary depending on the presence of a cellular telemetry device on the same bus. + +If a vulnerability cannot be remediated, other mitigations may be used. +Usually, the effect of these mitigations is to reduce exposure of the vulnerable component. +Therefore, a deployer’s response to Exposure may change if such mitigations are put in place. +If a mitigation changes exposure and thereby reduces the priority of a vulnerability, that mitigation can be considered a success. +Whether that mitigation allows the deployer to defer further action varies according to each case. + +Table: System Exposure Decision Values + +| Value | Definition | +| :--- | :------------ | +| Small | Local service or program; highly controlled network | +| Controlled | Networked service with some access restrictions or mitigations already in place (whether locally or on the network). A successful mitigation must reliably interrupt the adversary’s attack, which requires the attack is detectable both reliably and quickly enough to respond. *Controlled* covers the situation in which a vulnerability can be exploited through chaining it with other vulnerabilities. The assumption is that the number of steps in the attack path is relatively low; if the path is long enough that it is implausible for an adversary to reliably execute it, then *exposure* should be *small*. | +| Open | Internet or another widely accessible network where access cannot plausibly be restricted or controlled (e.g., DNS servers, web servers, VOIP servers, email servers) | + +### Gathering Information About System Exposure + +[*System Exposure*](#system-exposure) is primarily used by Deployers, so the question is about whether some specific system is in fact exposed, not a hypothetical or aggregate question about systems of that type. +Therefore, it generally has a concrete answer, even though it may vary from vulnerable component to vulnerable component, based on their respective configurations. + +[*System Exposure*](#system-exposure) can be readily informed by network scanning techniques. +For example, if the vulnerable component is visible on [Shodan](www.shodan.io) or by some other external scanning service, then it is [*open*](#system-exposure). +Network policy or diagrams are also useful information sources, especially for services intentionally open to the Internet such as public web servers. +An analyst should also choose [*open*](#system-exposure) for a phone or PC that connects to the web or email without the usual protections (IP and URL blocking, updated firewalls, etc.). + +Distinguishing between [*small*](#system-exposure) and [*controlled*](#system-exposure) is more nuanced. +If [*open*](#system-exposure) has been ruled out, some suggested heuristics for differentiating the other two are as follows. +Apply these heuristics in order and stop when one of them applies. + - If the system's networking and communication interfaces have been physically removed or disabled, choose [*small*](#system-exposure). + - If [*Automatable*](#automatable) is [*yes*](#automatable), then choose [*controlled*](#system-exposure). The reasoning behind this heuristic is that if reconnaissance through exploitation is automatable, then the usual deployment scenario exposes the system sufficiently that access can be automated, which contradicts the expectations of [*small*](#system-exposure). + - If the vulnerable component is on a network where other hosts can browse the web or receive email, choose [*controlled*](#system-exposure). + +If you have suggestions for further heuristics, or potential counterexamples to these, please describe the example and reasoning in an issue on the [SSVC GitHub](https://github.com/CERTCC/SSVC/issues). From bde8c7f8219ab89c8097bdaa3b43e60e1ba69ac9 Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 14:05:59 -0400 Subject: [PATCH 04/12] split ch7 and ch8 into their own separate files --- doc/md_src_files/07_00_00_prioritization.md | 147 ------------------ .../08_00_00_communicating_results.md | 147 ++++++++++++++++++ 2 files changed, 147 insertions(+), 147 deletions(-) create mode 100644 doc/md_src_files/08_00_00_communicating_results.md diff --git a/doc/md_src_files/07_00_00_prioritization.md b/doc/md_src_files/07_00_00_prioritization.md index a9eadb73..6a6746cd 100644 --- a/doc/md_src_files/07_00_00_prioritization.md +++ b/doc/md_src_files/07_00_00_prioritization.md @@ -311,150 +311,3 @@ If an organization does not have an asset management or risk management (see als For this tabletop refinement, we could not select a mathematically representative set of CVEs. The goal was to select a handful of CVEs that would cover diverse types of vulnerabilities. The CVEs that we used for our tabletop exercises are CVE-2017-8083, CVE-2019-2712, CVE-2014-5570, and CVE-2017-5753. We discussed each one from the perspective of supplier and deployer. We evaluated CVE-2017-8083 twice because our understanding and descriptions had changed materially after the first three CVEs (six evaluation exercises). After we were satisfied that the decision trees were clearly defined and captured our intentions, we began the formal evaluation of the draft trees, which we describe in the next section. -# Guidance on Communicating Results - -There are many aspects of SSVC that two parties might want to communicate. -Not every stakeholder will use the decision points to make comparable decisions. -Suppliers and deployers make interdependent decisions, but the actions of one group are not strictly dependent on the other. -Recall that one reason for this is that SSVC is about prioritizing a vulnerability response action in general, not specifically applying a patch that a supplier produced. -Coordinators are particularly interested in facilitating communication because that is their core function. -This section handles three aspects of this challenge: formats for communicating SSVC, how to handle partial or incomplete information, and how to handle information that may change over time. - -This section is about communicating SSVC information about a specific vulnerability. -Any stakeholder making a decision on allocating effort should have a decision tree with its decision points and possible values specified already. -[Representation choices](#representation-choices) and [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance) discussed how SSVC uses a text file as the canonical form of a decision tree; the example trees can be found in [SSVC/data](https://github.com/CERTCC/SSVC/tree/main/data). -This section discusses the situation where one stakeholder, usually a supplier or coordinator, wants to communicate some information about a specific vulnerability to other stakeholders or constituents. - -## Communication Formats - -We recommend two structured communication formats, abbreviated and full. -The goal of the abbreviated format is to fill a need for providing identifying information about a vulnerability or decision in charts, graphs, and tables. Therefore, the abbreviated format is not designed to stand alone. -The goal of the full format is to capture all the context and details about a decision or work item in a clear and machine-readable way. - -### Abbreviated Format - -SSVC abbreviated form borrows directly from the CVSS “vector string” notation. -Since the purpose of the abbreviated form is to provide labels for charts and graphics, it does not stand alone. -The basic format for SSVC is: -``` -SSVC/(version)/(decision point):(value)[/decision point:value[/decision point:value[...]]][/time]/ -``` -Where `version` is `v2` if it is based on this current version of the SSVC. -The term `decision point` is one or two letters derived from the name of the decision point as follows: - - Start with the decision point name as given in [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). - - Remove any text in parentheses (and the parentheses themselves). - - Remove the word “Impact” if it is part of the name. - - Create an initialism from remaining title-case words (ignore “of,” etc.), taking only the first two words. - - The first letter of the initialism is upper case; if there is a second letter, then it is lower case. - - Verify that the initialism is unique among decision points in the version of SSVC. If two initialisms collide, sort their source names equivalent to `LC_ALL=C sort`. The name that sorts first keeps the initialism for which there is a collision. Set the second letter of the initialism to the first character in the name that resolves the collision. If the names were `Threat` and `Threshold`, `T` would be `Threat` and `Ts` would be `Threshold`. We make an effort to design SSVC without such collisions. - -For example, [*Technical Impact*](#technical-impact) becomes `T` and [*Public Safety Impact*](#public-safety-impact) becomes `Ps`. - -The term `value` is a statement of the value or possible values of the decision point that precedes it and to which it is connected by a colon (`:`). -Similar to `decision point`, `value` should be made up of one or two letters derived from the name of the decision value in the section for its associated decision point. -For example [MEF support crippled](#mission-impact) becomes `Ms` and [efficient](#utility) becomes `E`. -The process is the same as above except that labels for a `value` do not need to be unique to the SSVC version, just unique to the associated `decision point`. - -The character `/` separates decision-point:value pairs. -As many pairs should be provided in the abbreviated form as are required to communicate the desired information about the vulnerability or work item. -A vector must contain at least one decision-point:value pair. -The ordering of the pairs should be sorted alphabetically from A to Z by the ASCII characters representing the decision points. -A trailing `/` is used to close the string. - -The vector is not tied to a specific decision tree. -It is meant to communicate information in a condensed form. -If priority labels (*defer*, etc.) are connected to a vector, then the decision tree used to reach those decisions should generally be noted. -However, for complex communication, machine-to-machine communication, or long-term storage of SSVC data, the JSON format and schema should be used. - -The optional parameter `time` is the date and time of the SSVCv2 record creation as represented in [RFC 3339, section 5.6](https://datatracker.ietf.org/doc/html/rfc3339). This is a subset of the date format also commonly known as ISO8601 format. - -Based on this, an example string could be: -``` -SSVCv2/Ps:Nm/T:T/U:E/2018-11-13T20:20:00Z/ -``` -For a vulnerability with [no or minor](#public-safety-impact) [*Public Safety Impact*](#public-safety-impact), [total](#technical-impact) [*Technical Impact*](#technical-impact), and [efficient](#utility) [*Utility*](#utility), which was evaluated on Nov 13,2018 at 8:20 PM UTC. - -While these abbreviated format vectors can be uniquely produced based on a properly formatted JSON object, going from abbreviated form to JSON is not supported. -Therefore, JSON is the preferred storage and transmission method. - -### Full JSON format - -For a more robust, self-contained, machine-readable, we provide JSON schemas. -The [provision schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Provision_v2.03.schema.json) is equivalent to a decision tree and documents the full set of logical statements that a stakeholder uses to make decisions. -The [computed schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Computed_v2.03.schema.json) expresses a set of information about a work item or vulnerability at a point in time. -A computed schema should identify the provision schema used, so the options from which the information was computed are specified. - -Each element of `choices` should be an object that is a key-value pair of `decision point`:`value`, where the term `decision point` is a string derived from the name of the decision point as follows: - - Start with the decision point name as given in [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). - - Remove any text in parentheses (and the parentheses themselves). - - Remove colon characters, if any (`:`). - - Convert the string to [lower camel case](https://en.wikipedia.org/wiki/Camel_case) by lowercasing the string, capitalizing any letter after a space, and removing all spaces. - -The `value` term is derived the same way as `decision point` except start with the value name as given in the relevant decision point subsection of [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). - -## Partial or Incomplete Information - -What an analyst knows about a vulnerability may not be complete. -However, the vulnerability management community may still benefit from partial information. -In particular, suppliers and coordinators who might not know everything a deployer knows can still provide benefit to deployers by sharing what partial information they do know. -A second benefit to providing methods for communicating partial information is the reduction of bottlenecks or barriers to information exchange. -A timely partial warning is better than a complete warning that is too late. - -The basic guidance is that the analyst should communicate all of the vulnerability's possible states, to the best of the analyst's knowledge. -If the analyst knows nothing, all states are possible. -For example, [*Utility*](#utility) may be [laborious](#utility), [efficient](#utility), or [super effective](#utility). -In abbreviated form, write this as `U:LESe`. -Since a capital letter always indicates a new value, this is unambiguous. - -The reason a stakeholder might publish something like `U:LESe` is that it expresses that the analyst thought about [*Utility*](#utility) but does not have anything to communicate. -A stakeholder might have information to communicate about some decision points but not others. -If SSVC uses this format to list the values that are in play for a particular vulnerability, there is no need for a special “I don't know” marker. - -The merit in this “list all values” approach emerges when the stakeholder knows that the value for a decision point may be A or B, but not C. -For example, say the analyst knows that [*Value Density*](#value-density) is [diffuse](#value-density) but does not know the value for [*Automatability](#automatability). -Then the analyst can usefully restrict [*Utility*](#utility) to one of [laborious](#utility) or [efficient](#utility). -In abbreviated form, write this as `U:LE`. -As discussed below, information can change over time. -Partial information may be, but is not required to be, sharpened over time into a precise value for the decision point. - -## Information Changes Over Time - -Vulnerability management decisions are dynamic, and may change over time as the available information changes. -Information changes are one reason why SSVC decisions should always be timestamped. -SSVC decision points have different temporal properties. -Some, such as [*Utility*](#utility), are mostly static. -For [*Utility*](#utility) to change, the market penetration or deployment norms of a vulnerable component would have to meaningfully change. -Some, such as [*State of Exploitation*](#state-of-exploitation), may change quickly but only in one direction. - -Both of these examples are out of the direct control of the vulnerability manager. -Some, such as [*Exposure*](#exposure), change mostly due to actions taken by the organization managing the vulnerable component. -If the actor who can usually trigger a relevant change is the organization using SSVC, then it is relatively straightforward to know when to update the SSVC decision. -That is, the organization should reevaluate the decision when they make a relevant change. -For those decision points that are about topics outside the control of the organization using SSVC, then the organization should occasionally poll their information sources for changes. -The cadence or rate of polls is different for each decision point, based on the expected rate of change. - -We expect that updating information over time will be most useful where the evidence-gathering process can be automated. -Organizations that have mature asset management systems will also find this update process more efficient than those that do not. -For an organization without a mature asset management system, we would recommend putting organizational resources into maturing that function before putting effort into regular updates to vulnerability prioritization decision points. - -The following decision points are usually out of the control of the organization running SSVC. -As an initial heuristic, we suggest the associated polling frequency for each. -These frequencies can be customized, as the update frequency is directly related to the organization's tolerance for the risk that the information is out of date. -As discussed in [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance), risk tolerance is unique to each organization. -Risk tolerance and risk appetite are primarily reflected in the priority labels (that is, decisions) encoded in the SSVC decision tree, but information polling frequency is also a risk tolerance decision and each organization may choose different time values. - - [*State of Exploitation*](#state-of-exploitation): every 1 day - - [*Technical Impact*](#technical-impact): never (should be static per vulnerability) - - [*Utility*](#utility): every 6 months - - [*Public Safety Impact*](#public-safety-impact): every 1 year - -The following decision points are usually in the control of the organization running SSVC and should be reevaluated when a relevant change is made or during annual reviews of assets. - - - [*Situated Safety Impact*](#situated-safety-impact) - - [*Mission Impact*](#mission-impact) - - [*System Exposure*](#system-exposure) - -If SSVC information is all timestamped appropriately (as discussed earlier in this section), then an analyst can compare the timestamp to the current date and determine whether information is considered stale. -The above rates are heuristic suggestions, and organizations may choose different ones. -Any public repository of vulnerability information should keep a change log of when values change for each decision point, for each vulnerability. -Vulnerability response analysts should keep such change logs as well. -Similar to logging practices recommended for incident response [@nist800-61r2], such practices make the process less error-prone and facilitate after-action reviews. diff --git a/doc/md_src_files/08_00_00_communicating_results.md b/doc/md_src_files/08_00_00_communicating_results.md new file mode 100644 index 00000000..f401327b --- /dev/null +++ b/doc/md_src_files/08_00_00_communicating_results.md @@ -0,0 +1,147 @@ +# Guidance on Communicating Results + +There are many aspects of SSVC that two parties might want to communicate. +Not every stakeholder will use the decision points to make comparable decisions. +Suppliers and deployers make interdependent decisions, but the actions of one group are not strictly dependent on the other. +Recall that one reason for this is that SSVC is about prioritizing a vulnerability response action in general, not specifically applying a patch that a supplier produced. +Coordinators are particularly interested in facilitating communication because that is their core function. +This section handles three aspects of this challenge: formats for communicating SSVC, how to handle partial or incomplete information, and how to handle information that may change over time. + +This section is about communicating SSVC information about a specific vulnerability. +Any stakeholder making a decision on allocating effort should have a decision tree with its decision points and possible values specified already. +[Representation choices](#representation-choices) and [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance) discussed how SSVC uses a text file as the canonical form of a decision tree; the example trees can be found in [SSVC/data](https://github.com/CERTCC/SSVC/tree/main/data). +This section discusses the situation where one stakeholder, usually a supplier or coordinator, wants to communicate some information about a specific vulnerability to other stakeholders or constituents. + +## Communication Formats + +We recommend two structured communication formats, abbreviated and full. +The goal of the abbreviated format is to fill a need for providing identifying information about a vulnerability or decision in charts, graphs, and tables. Therefore, the abbreviated format is not designed to stand alone. +The goal of the full format is to capture all the context and details about a decision or work item in a clear and machine-readable way. + +### Abbreviated Format + +SSVC abbreviated form borrows directly from the CVSS “vector string” notation. +Since the purpose of the abbreviated form is to provide labels for charts and graphics, it does not stand alone. +The basic format for SSVC is: +``` +SSVC/(version)/(decision point):(value)[/decision point:value[/decision point:value[...]]][/time]/ +``` +Where `version` is `v2` if it is based on this current version of the SSVC. +The term `decision point` is one or two letters derived from the name of the decision point as follows: + - Start with the decision point name as given in [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). + - Remove any text in parentheses (and the parentheses themselves). + - Remove the word “Impact” if it is part of the name. + - Create an initialism from remaining title-case words (ignore “of,” etc.), taking only the first two words. + - The first letter of the initialism is upper case; if there is a second letter, then it is lower case. + - Verify that the initialism is unique among decision points in the version of SSVC. If two initialisms collide, sort their source names equivalent to `LC_ALL=C sort`. The name that sorts first keeps the initialism for which there is a collision. Set the second letter of the initialism to the first character in the name that resolves the collision. If the names were `Threat` and `Threshold`, `T` would be `Threat` and `Ts` would be `Threshold`. We make an effort to design SSVC without such collisions. + +For example, [*Technical Impact*](#technical-impact) becomes `T` and [*Public Safety Impact*](#public-safety-impact) becomes `Ps`. + +The term `value` is a statement of the value or possible values of the decision point that precedes it and to which it is connected by a colon (`:`). +Similar to `decision point`, `value` should be made up of one or two letters derived from the name of the decision value in the section for its associated decision point. +For example [MEF support crippled](#mission-impact) becomes `Ms` and [efficient](#utility) becomes `E`. +The process is the same as above except that labels for a `value` do not need to be unique to the SSVC version, just unique to the associated `decision point`. + +The character `/` separates decision-point:value pairs. +As many pairs should be provided in the abbreviated form as are required to communicate the desired information about the vulnerability or work item. +A vector must contain at least one decision-point:value pair. +The ordering of the pairs should be sorted alphabetically from A to Z by the ASCII characters representing the decision points. +A trailing `/` is used to close the string. + +The vector is not tied to a specific decision tree. +It is meant to communicate information in a condensed form. +If priority labels (*defer*, etc.) are connected to a vector, then the decision tree used to reach those decisions should generally be noted. +However, for complex communication, machine-to-machine communication, or long-term storage of SSVC data, the JSON format and schema should be used. + +The optional parameter `time` is the date and time of the SSVCv2 record creation as represented in [RFC 3339, section 5.6](https://datatracker.ietf.org/doc/html/rfc3339). This is a subset of the date format also commonly known as ISO8601 format. + +Based on this, an example string could be: +``` +SSVCv2/Ps:Nm/T:T/U:E/2018-11-13T20:20:00Z/ +``` +For a vulnerability with [no or minor](#public-safety-impact) [*Public Safety Impact*](#public-safety-impact), [total](#technical-impact) [*Technical Impact*](#technical-impact), and [efficient](#utility) [*Utility*](#utility), which was evaluated on Nov 13,2018 at 8:20 PM UTC. + +While these abbreviated format vectors can be uniquely produced based on a properly formatted JSON object, going from abbreviated form to JSON is not supported. +Therefore, JSON is the preferred storage and transmission method. + +### Full JSON format + +For a more robust, self-contained, machine-readable, we provide JSON schemas. +The [provision schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Provision_v2.03.schema.json) is equivalent to a decision tree and documents the full set of logical statements that a stakeholder uses to make decisions. +The [computed schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Computed_v2.03.schema.json) expresses a set of information about a work item or vulnerability at a point in time. +A computed schema should identify the provision schema used, so the options from which the information was computed are specified. + +Each element of `choices` should be an object that is a key-value pair of `decision point`:`value`, where the term `decision point` is a string derived from the name of the decision point as follows: + - Start with the decision point name as given in [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). + - Remove any text in parentheses (and the parentheses themselves). + - Remove colon characters, if any (`:`). + - Convert the string to [lower camel case](https://en.wikipedia.org/wiki/Camel_case) by lowercasing the string, capitalizing any letter after a space, and removing all spaces. + +The `value` term is derived the same way as `decision point` except start with the value name as given in the relevant decision point subsection of [Likely Decision Points and Relevant Data](#likely-decision-points-and-relevant-data). + +## Partial or Incomplete Information + +What an analyst knows about a vulnerability may not be complete. +However, the vulnerability management community may still benefit from partial information. +In particular, suppliers and coordinators who might not know everything a deployer knows can still provide benefit to deployers by sharing what partial information they do know. +A second benefit to providing methods for communicating partial information is the reduction of bottlenecks or barriers to information exchange. +A timely partial warning is better than a complete warning that is too late. + +The basic guidance is that the analyst should communicate all of the vulnerability's possible states, to the best of the analyst's knowledge. +If the analyst knows nothing, all states are possible. +For example, [*Utility*](#utility) may be [laborious](#utility), [efficient](#utility), or [super effective](#utility). +In abbreviated form, write this as `U:LESe`. +Since a capital letter always indicates a new value, this is unambiguous. + +The reason a stakeholder might publish something like `U:LESe` is that it expresses that the analyst thought about [*Utility*](#utility) but does not have anything to communicate. +A stakeholder might have information to communicate about some decision points but not others. +If SSVC uses this format to list the values that are in play for a particular vulnerability, there is no need for a special “I don't know” marker. + +The merit in this “list all values” approach emerges when the stakeholder knows that the value for a decision point may be A or B, but not C. +For example, say the analyst knows that [*Value Density*](#value-density) is [diffuse](#value-density) but does not know the value for [*Automatability](#automatability). +Then the analyst can usefully restrict [*Utility*](#utility) to one of [laborious](#utility) or [efficient](#utility). +In abbreviated form, write this as `U:LE`. +As discussed below, information can change over time. +Partial information may be, but is not required to be, sharpened over time into a precise value for the decision point. + +## Information Changes Over Time + +Vulnerability management decisions are dynamic, and may change over time as the available information changes. +Information changes are one reason why SSVC decisions should always be timestamped. +SSVC decision points have different temporal properties. +Some, such as [*Utility*](#utility), are mostly static. +For [*Utility*](#utility) to change, the market penetration or deployment norms of a vulnerable component would have to meaningfully change. +Some, such as [*State of Exploitation*](#state-of-exploitation), may change quickly but only in one direction. + +Both of these examples are out of the direct control of the vulnerability manager. +Some, such as [*Exposure*](#exposure), change mostly due to actions taken by the organization managing the vulnerable component. +If the actor who can usually trigger a relevant change is the organization using SSVC, then it is relatively straightforward to know when to update the SSVC decision. +That is, the organization should reevaluate the decision when they make a relevant change. +For those decision points that are about topics outside the control of the organization using SSVC, then the organization should occasionally poll their information sources for changes. +The cadence or rate of polls is different for each decision point, based on the expected rate of change. + +We expect that updating information over time will be most useful where the evidence-gathering process can be automated. +Organizations that have mature asset management systems will also find this update process more efficient than those that do not. +For an organization without a mature asset management system, we would recommend putting organizational resources into maturing that function before putting effort into regular updates to vulnerability prioritization decision points. + +The following decision points are usually out of the control of the organization running SSVC. +As an initial heuristic, we suggest the associated polling frequency for each. +These frequencies can be customized, as the update frequency is directly related to the organization's tolerance for the risk that the information is out of date. +As discussed in [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance), risk tolerance is unique to each organization. +Risk tolerance and risk appetite are primarily reflected in the priority labels (that is, decisions) encoded in the SSVC decision tree, but information polling frequency is also a risk tolerance decision and each organization may choose different time values. + - [*State of Exploitation*](#state-of-exploitation): every 1 day + - [*Technical Impact*](#technical-impact): never (should be static per vulnerability) + - [*Utility*](#utility): every 6 months + - [*Public Safety Impact*](#public-safety-impact): every 1 year + +The following decision points are usually in the control of the organization running SSVC and should be reevaluated when a relevant change is made or during annual reviews of assets. + + - [*Situated Safety Impact*](#situated-safety-impact) + - [*Mission Impact*](#mission-impact) + - [*System Exposure*](#system-exposure) + +If SSVC information is all timestamped appropriately (as discussed earlier in this section), then an analyst can compare the timestamp to the current date and determine whether information is considered stale. +The above rates are heuristic suggestions, and organizations may choose different ones. +Any public repository of vulnerability information should keep a change log of when values change for each decision point, for each vulnerability. +Vulnerability response analysts should keep such change logs as well. +Similar to logging practices recommended for incident response [@nist800-61r2], such practices make the process less error-prone and facilitate after-action reviews. From 5cc108126fd4cd0bc1910e6b8f80d16eecfcf4dc Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 14:15:20 -0400 Subject: [PATCH 05/12] split ch4 by section --- ...0_00_vulnerability_management_decisions.md | 267 ------------------ .../04_01_00_enumerating_stakeholders.md | 33 +++ .../04_02_00_enumerating_decisions.md | 13 + .../04_03_00_enumerating_actions.md | 129 +++++++++ .../04_04_00_items_with_same_priority.md | 8 + .../04_05_00_risk_tolerance_and_priority.md | 16 ++ doc/md_src_files/04_06_00_scope.md | 67 +++++ 7 files changed, 266 insertions(+), 267 deletions(-) create mode 100644 doc/md_src_files/04_01_00_enumerating_stakeholders.md create mode 100644 doc/md_src_files/04_02_00_enumerating_decisions.md create mode 100644 doc/md_src_files/04_03_00_enumerating_actions.md create mode 100644 doc/md_src_files/04_04_00_items_with_same_priority.md create mode 100644 doc/md_src_files/04_05_00_risk_tolerance_and_priority.md create mode 100644 doc/md_src_files/04_06_00_scope.md diff --git a/doc/md_src_files/04_00_00_vulnerability_management_decisions.md b/doc/md_src_files/04_00_00_vulnerability_management_decisions.md index a0171421..9744d8fd 100644 --- a/doc/md_src_files/04_00_00_vulnerability_management_decisions.md +++ b/doc/md_src_files/04_00_00_vulnerability_management_decisions.md @@ -1,5 +1,3 @@ - - # Vulnerability Management Decisions This section will define our audience for decision advice and how we are scoping our advice on vulnerability management decisions. @@ -11,268 +9,3 @@ The “what” is about the scope, both in how the affected system is defined an While we strive to make our examples realistic, we invite the community to engage and conduct empirical assessments to test them. The following construction should be treated as an informed hypothesis rather than a conclusion. -## Enumerating Stakeholders - -Stakeholders in vulnerability management can be identified within multiple independent axes. -For example, they can be identified by their responsibility: whether the group *supplies*, *deploys*, or *coordinates* remediation actions. -Depending what task a team is performing in a supply chain, the team may be considered a supplier, deployer, or a coordinator. -Therefore, one organization may have teams that take on different roles. -For example, an organization that develops and uses its own software might delegate the supplier role to its development team and the deployer role to its IT operations team. -On the other hand, organizations using a DevOps approach to providing services might have a single group responsible for both the supplier and deployer roles. -Organizations may also be distinguished by the type of industry sector. While it might be useful to enumerate all the sectors of the economy, we propose to draft decision points that include those from multiple important sectors. -For example, we have safety-related questions in the decision path for all suppliers and deployers. -The decision will be assessed whether or not the stakeholder is in a safety-critical sector. - -The choice not to segregate the decisions by sector is reinforced by the fact that any given software system might be used by different sectors. -It is less likely that one organization has multiple responsibilities within the vulnerability management process. -Even if there is overlap within an organization, the two responsibilities are often located in distinct business units with distinct decision-making processes. -We can treat the responsibilities as non-overlapping, and, therefore, we can build two decision trees—one for each of the “patch supplier” and “patch deployer” responsibilities in the vulnerability management process. -We leave “coordinating patches” as future work. -Each of these trees will have different decision points that they take to arrive at a decision about a given unit of work. - - -The next two sections describe the decision space and the relevant decision points that we propose for these two responsibilities within the vulnerability management process. - -The target audience for this paper is professional staff responsible for making decisions about information systems. -This audience encompasses a broad class of professionals, including suppliers, system maintainers, and administrators of many types. -It also includes other roles, such as risk managers, technical managers, and incident responders. -Although every layperson who owns a computing device makes decisions about managing it, they are not the target audience. -The following decision system may help such laypeople, but we do not intend it to be used by that audience. - -While C-level executives and public policy professionals often make, shape, or incentivize decisions about managing information systems, they are not the target audience, either. -To the extent that decision trees for vulnerability management help higher level policy decisions, we believe the best way to help policy makers is by making technical decisions more transparent and explainable. -Policy makers may see indirect benefits, but they are not our primary audience and we are not designing an approach for them directly. - - -## Enumerating Decisions - -Stakeholders with different responsibilities in vulnerability management have very different decisions to make. -This section focuses on the differences among organizations based on their vulnerability management responsibilities. -Some decision makers may have different responsibilities in relation to different software. For example, an Android app developer is a developer of the app, but is a deployer for any changes to the Android OS API. -This situation is true for libraries in general. -A web browser developer makes decisions about applying patches to DNS lookup libraries and transport layer security (TLS) libraries. -A video game developer makes decisions about applying patches released to the Unreal Engine. -A medical device developer makes decisions about applying patches to the Linux kernel. The list goes on. -Alternatively, one might view applying patches as including some development and distribution of the updated product. -Or one might take the converse view, that development includes updating libraries. -Either way, in each of these examples (mobile device apps, web browsers, video games, medical devices), we recommend that the professionals making genuine decisions do three things: (1) identify the decisions explicitly, (2) describe how they view their role(s), and (3) identify which software projects their decision relates to. -If their decisions are explicit, then the decision makers can use the recommendations from this document that are relevant to them. - -## Enumerating Vulnerability Management Actions -SSVC models the decision of “With what priority should the organization take action on a given vulnerability management work unit?” to be agnostic to whether or not a patch is available. -A unit of work means either remediating the vulnerability—such as applying a patch—or deploying a mitigation. Both remediation and mitigations often address multiple identified vulnerabilities. - -The unit of work may be different for different stakeholders. -The units of work can also change as the vulnerability response progresses through a stakeholder's process. -We elucidate these ideas with the following examples. - -### Supplier Units of Work - -On the input side of the Supplier process, Suppliers typically receive reports of vulnerabilities in one or more versions of their product. -Part of the Supplier's task on initial report intake is to resolve the initial report into a set of products and versions that are affected by the reported vulnerability. - -Our working assumption is that for SSVC purposes, the supplier's unit of work is the combination of the vulnerability with each affected product. -This implies the need for Suppliers to be able to resolve whatever they receive to that level of granularity in order to make best use of SSVC. - -Products will often need to be addressed individually because they may have diverse development processes or usage scenarios. -There are a variety of ways a Supplier might need to resolve the set of affected products. For example, they might - -- recognize, on further investigation of the initial report, that additional versions of the product are affected -- discover that other products are affected due to code sharing or programmer error consistent across products -- receive reports of vulnerabilities in third party libraries they utilize in one or more of their products -- receive fix bundles for third party libraries used in one or more of their products (where a fix bundle might resolve multiple vulnerabilities or add new features) - -Without belaboring the point, the above methods are similar to how CVE Numbering Authorities discern “independently fixable vulnerabilities” [@mitre2020cna]. -We also note that SBOM[@manion2019sbom] seems well-placed to aid in that resolution process for the third-party library scenarios. - -In the end, Suppliers provide remediations and/or mitigations for affected products. -A supplier-provided remediation is usually a software update which contains fixes for multiple vulnerabilities and, often, new or improved features. -Supplier output is relevant because it will become input to Deployers. -SSVC focuses only on the remediation in this case; a set of remediations for multiple vulnerabilities is a fix bundle. -Suppliers may also produce mitigations, such as recommended configuration changes, to limit the impact of a vulnerability. - - -### Deployer Units of Work ### - -Deployers are usually in the position of receiving remediations or mitigations from their Suppliers for products they have deployed. -They must then decide whether to deploy the remediation or mitigation to a particular instance (or not). -Whether they have the option of deploying only part of a remediation such as a fix bundle depends on whether the Supplier has engineered their release process to permit that degree of flexibility. -For example, if service packs are fix bundles, the Supplier might choose to release individually deployable fixes as well. - -The vulnerability management process for deployers has at its core the collation of data including -- an inventory of deployed instances of product versions -- a mapping of vulnerabilities to remediations or mitigations -- a mapping of remediations and/or mitigations to product versions - -The first must be collected by the Deployer, while the latter two most often originate from the product Supplier. -Managing this information is generally called **asset management**. -The relationship between SSVC and asset management is discussed further in [Relationship to asset management](#relationship-to-asset-management). - -In turn, Deployers must resolve this information into specific actions in which a remediation or mitigation is slated for deployment to replace or modify a particular instance of the product. -The Deployer tree in SSVC considers the mission and safety risks inherent to the category of systems to which those deployed instances belong. -For this reason, we recommend that the pairing of remediation or mitigation to a product version instance constitutes the unit of work most appropriate for the SSVC. - -### Coordinator Units of Work ### - -Coordinator units of work tend to coincide with whatever arrives in a single report, which spans the range from a single vulnerability affecting a specific version of an individual product from one Supplier all the way to fundamental design flaws in system specifications that could affect every Supplier and product that uses or implements the flawed specification. -Coordinators may need to reorganize reports (e.g., merge, split, expand, or contract) according to their workflow demands. SSVC can be applied to either the initial report or to the results of such refinement. - -### Aggregation of SSVC across units of work - -SSVC users should answer the suggested questions for whatever discrete unit of work they are considering. There is not necessarily a reliable function to aggregate a recommendation about remediation out of its constituent vulnerabilities. For the sake of simplicity of examples, we treat the remediation as a patch of one vulnerability, and comment on any difficulty in generalizing our advice to a more complex patch where appropriate. - -To further clarify terms, “Remediation occurs when the vulnerability is eliminated or removed. Mitigation occurs when the impact of the vulnerability is decreased without reducing or eliminating the vulnerability” [@dodi_8531_2020, section 3.5]. Examples of remediation include applying patches, fixes and upgrades; or removing the vulnerable software or system from operation. Mitigating actions may include software configuration changes, adding firewall ACLs, or otherwise limiting the system's exposure to reduce the risk of the impact of the vulnerability; or accepting the risk. - -### Supplying Patches - -At a basic level, the decision at a software development organization is whether to issue a work order and what resources to expend to remediate a vulnerability in the organization’s software. Prioritization is required because, at least in the current history of software engineering, the effort to patch all known vulnerabilities will exceed available resources. The organization considers several other factors to build the patch; refactoring a large portion of the code base may be necessary for some patches, while others require relatively small changes. -We focus only on the priority of building the patch, and we consider four categories of priority, as outlined in [Table 2](#table-supplier-outcomes). - -Table: Proposed Meaning for Supplier Priority Outcomes - -| Supplier Priority | Description | -| :--- | :---------- | -| Defer | Do not work on the patch at present. | -| Scheduled | Develop a fix within regularly scheduled maintenance using supplier resources as normal. | -| Out-of-Cycle | Develop mitigation or remediation out-of-cycle, taking resources away from other projects and releasing the fix as a security patch when it is ready. | -| Immediate | Develop and release a fix as quickly as possible, drawing on all available resources, potentially including drawing on or coordinating resources from other parts of the organization. | - -### Deploying Patches - -A mitigation that successfully changes the value of a decision point may shift the priority of further action to a reduced state. An effective firewall or IDS rule coupled with an adequate change control process for rules may be enough to reduce the priority where no further action is necessary. In the area of Financial impacts, a better insurance policy may be purchased, providing necessary fraud insurance. Physicial well-being impact may be reduced by testing the physicial barriers designed to restrict a robot's ability to interact with humans. Mission impact could be reduced by correcting the problems identified in a disaster recover test-run of the alternate business flow. If applying a mitigation reduces the priority to *defer*, the deployer may not need to apply a remediation if it later becomes available. [Table 3](#table-deployer-outcomes) displays the action priorities for the deployer, which are similar to the supplier case. - -When remediation is available, usually the action is to apply it. When remediation is not yet available, the action space is more diverse, but it should involve mitigating the vulnerability (e.g., shutting down services or applying additional security controls) or accepting the risk of not mitigating the vulnerability. Applying mitigations may change the value of decision points. For example, effective firewall and IDS rules may change [*System Exposure*](#system-exposure) from open to controlled. Financial well-being, a [*Safety Impact*](#safety-impact) category, might be reduced with adequate fraud detection and insurance. Physical well-being, also a [*Safety Impact*](#safety-impact) category, might be reduced by physical barriers that restrict a robot's ability to interact with humans. [*Mission Impact*](#mission-impact) might be reduced by introducing back-up business flows that do not use the vulnerable component. In a later section we combine [Mission and Situated Safety Impact](#table-mission-safety-combined) to reduce the complexity of the tree. - -However, these mitigation techniques will not always work. For example, the implementation of a firewall or IDS rule to mitigate [*System Exposure*](#system-exposure) from open to controlled is only valid until someone changes the rule. In the area of Financial impacts, the caps on the insurance may be too low to act as a mitigation. -The Physical impact may be increased by incorrect installation of the physical barriers designed to restrict a robot’s ability to interact with humans. -The [*Mission Impact*](#mission-impact) could be increased when a disaster recovery test-run identifies problems with an alternate business flow. The mitigating action may not be permanent or work as designed. - -A mitigation that successfully changes the value of a decision point may shift the priority of further action to a reduced state. If applying a mitigation reduces the priority to *defer*, the deployer may not need to apply a remediation, if later, it becomes available. Table 3 displays the action priorities for the deployer, which are similar to the supplier case. - -In a later section, the different types of impacts are defined and then implemented in the decision trees as examples of how the various impacts affect the priority. -For now, assume the decision points are ordered as: [*Exploitation*](#exploitation); [*Exposure*](#exposure); [*Utility*](#utility); and [*Human Impact*](#human-impact). -In this order, an [_active_](#exploitation) state of [*Exploitation*](#exploitation) will never result in a *defer* priority. -A [_none_](#exploitation) state of [*Exploitation*](#exploitation) (no evidence of exploitation) will result in either *defer* or *scheduled* priority—unless the state of [*Human Impact*](#human-impact) is [_very high_](#human-impact), resulting in an *out-of-cycle* priority. - -As opposed to mitigation, applying a remediation finishes an SSVC analysis of a deployed system. -While specific vulnerabilities in specific systems can be remediated, the vulnerability cannot be 'disposed of' or eliminated from future consideration within an IT environment. -Since software and systems are dynamic, a single vulnerability can be re-introduced after initial remediation through updates, software rollbacks, or other systemic actions that change the operating conditions within an environment. -It is therefore important to continually monitor remediated environments for vulnerabilities reintroduced by either rollbacks or new deployments of outdated software. - - -Table: Proposed Meaning for Deployer Priority Outcomes - -| Deployer Priority | Description | -| :--- | :---------- | -| Defer | Do not act at present. | -| Scheduled | Act during regularly scheduled maintenance time. | -| Out-of-cycle | Act more quickly than usual to apply the mitigation or remediation out-of-cycle, during the next available opportunity, working overtime if necessary. | -| Immediate | Act immediately; focus all resources on applying the fix as quickly as possible, including, if necessary, pausing regular organization operations. | - -### Coordinating Patches -In coordinated vulnerability disclosure (CVD), there are two available decisions modelled in version 2 of SSVC. -The first is whether or not to coordinate a vulnerability report. -This decision is also known as triage. -Vulnerability Response Decision Assistance (VRDA) provides a starting point for a decision tree for this situation. -VRDA is likely adequate for national-level CSIRTs that do general CVD, but other CSIRT types may have different needs. -The *CERT guide to Coordinated Vulnerability Disclosure* provides something similar for those who are deciding how to report and disclose vulnerabilities they have discovered [@householder2020cvd, section 6.10]. -The second decision is whether to publish information about a vulnerability. -We omit a table for this decision because the options are *do not publish* or *publish*. - -Table: Proposed Coordinator Triage Priority Outcomes - -| Triage Priority | Description | -| :--- | :---------- | -| Decline | Do not act on the report. | -| Track | Receive information about the vulnerability and monitor for status changes but do not take any overt actions. | -| Coordinate | Take action on the report. “Action” may include any one or more of: technical analysis, reproduction, notifying vendors, publication, and assist another party. | - -## Items With the Same Priority - -Within each setting, the decisions are a kind of equivalence class for priority. That is, if an organization must deploy patches for three vulnerabilities, and if these vulnerabilities are all assigned the *scheduled* priority, then the organization can decide which to deploy first. The priority is equivalent. This approach may feel uncomfortable since CVSS gives the appearance of a finer grained priority. CVSS appears to say, “Not just 4.0 to 6.9 is ‘medium’ severity, but 4.6 is more severe than 4.5.” However, as discussed previously (see page 4), CVSS is designed to be accurate only within +/- 0.5, and, in practice, is scored with errors of around +/- 1.5 to 2.5 [@allodi2018effect, see Figure 1]. An error of this magnitude is enough to make all of the “normal” range from 4.0 to 6.9 equivalent, because 5.5 +/- 1.5 is the range 4.0 to 7.0. Our proposal is an improvement over this approach. CVSS errors often cross decision boundaries; in other words, the error range often includes the transition between “high” and “critical” or “medium.” Since our approach keeps the decisions qualitatively defined, this fuzziness does not -affect the results. - -Returning to the example of an organization with three vulnerabilities to remediate that were assigned *scheduled* priority, in SSVC, they can be patched in any order. This is an improvement over CVSS, since based on the scoring errors, CVSS was essentially just giving random fine-grained priorities within qualitative categories anyway. With our system, organizations can be more deliberate about conveniently organizing work that is of equivalent priority. - -## Risk Tolerance and Response Priority - -SSVC enables stakeholders to balance and manage their risks themselves. -We follow the risk management vocabulary from [@ISO73] and define risk as “effect of uncertainty on objectives;” see [@ISO73] for notes on the terms in the definition. -A successful vulnerability management practice must balance at least two risks: - -1. Change risk: the potential costs of deploying remediation, which include testing and deployment in addition to any problems that could arise from making changes to production systems. -2. Vulnerability risk: the potential costs of incidents resulting from exploitation of vulnerable systems - -To place these risks in context, we follow the SEI's Taxonomy of Operational Cyber Security Risks [@cebula2010taxonomy]. Change risk can be characterized as a combination of Class 2 and/or Class 3 risks. Class 2: Systems and Technology Failures includes hardware, software, and systems risks. Class 3: Failed Internal Processes can arise from process design, process execution, process controls, or supporting processes. Meanwhile, vulnerability risk falls into Subclass 1.2: Actions of People: Deliberate. - -In developing the decision trees in this document, we had in mind stakeholders with a moderate tolerance for risk. The resulting trees reflect that assumption. Organizations may of course be more or less conservative in their own vulnerability management practices, and we cannot presume to determine how an organization should balance their risk. - -We therefore remind our readers that the labels on the trees (defer, immediate, etc.) can and should be customized to suit the needs of individual stakeholders wherever necessary and appropriate. For example, an organization with a high aversion to change risk might choose to accept more vulnerability risk by lowering the overall response labels for many branches in the trees, resulting in fewer vulnerabilities attaining the most urgent response. On the other hand, an organization with a high aversion to vulnerability risk could elevate the priority of many branches to ensure fixes are deployed quickly. - -## Scope - -Scope is an important variable in the answers of these decision points. -It has at least three aspects. -The first is how the boundaries of the affected system are set. -The second is whose security policy is relevant. -The third is how far forward in time or causal steps one reasons about effects and harms. -We put forward recommendations for each of these aspects of scope. - -However, users of the decision process may want to define different scopes. -Users may define a different scope as long as the scope (1) is consistent across decisions, and (2) is credible, explicit, and accessible to all relevant decision makers. - -For example, suppliers often decline to support products beyond a declared end-of-life (EOL) date. In these cases, a supplier could reasonably consider vulnerabilities in those products to be out of scope. However, a deployer may still have active instances of EOL products in their infrastructure. It remains appropriate for a deployer to use SSVC to prioritize their response to such situations, since even if there is no remediation forthcoming from the supplier it may be possible for the deployer to mitigate or remediate the vulnerability in other ways, up to and including decommissioning the affected system(s). - -### Boundaries of the Affected System - -One distinction is whether the system of interest is software per se or a cyber-physical system. -A problem is that in every practical case, both are involved. -Software is what has vulnerabilities and is what vulnerability management is focused on remediating. -Multiple pieces of software run on any given computer system. -To consider software vulnerabilities in a useful scope, we follow prior work and propose that a vulnerability affects “the set of software instructions that executes in an environment with a coherent function and set of permissions” [@spring2015global]. -This definition is useful because it lets us keep to common usage and intuition and call the Linux kernel—at least a specific version of it—“one piece” of software. - -But decision points about safety and mission impact are not about the software in isolation; they are about the entire cyber-physical system, of which the software is a part. -The term “physical” in “cyber-physical system” should be interpreted broadly; selling stocks or manipulating press outlet content are both best understood as affecting human social institutions. -These social institutions do not have much of a corporeal instantiation, but they are physical in the sense that they are not merely software, and so are parts of cyber-physical systems. - -The hard part of delineating the boundaries of the affected system is specifying what it means for one system to be part of another system. -Just because a computer is bolted to a wall does not mean the computer is part of the wall’s purpose, which is separating physical space. -At the same time, an off-premises DNS server may be part of the site security assurance system if the on-premises security cameras rely on the DNS server to connect to the displays at the guard’s desk. -We define computer software as part of a cyber-physical system if the two systems are mutually manipulable; that is, changes in the part (the software) will (at least, often) make detectable and relevant changes to the whole (the cyber-physical system), and changes in the whole will (often) make relevant and detectable changes in the part [@spring2018generalization]. - -When reasoning about a vulnerability, we assign the vulnerability to the nearest, relevant—yet more abstract—discrete component. -This assignment is particularly important when assessing technical impact on a component. This description bears some clarification, via each of the adjectives: - - - **Nearest** is relative to the abstraction at which the vulnerability exists. - - - **Relevant** implies that the impacted component must be in the chain of abstraction moving upward from the location of the flaw. - - - **More abstract** means that the impacted component is at least one level of abstraction above the specific location of the vulnerability. For example, if the vulnerability is localized to a single line of code in a function, then the function, the module, the library, the application, the product, and the system it belongs to are all “more abstract.” - - - **Discrete** means there is an identifiable thing that can be remediated (e.g., the unit of patching). - -Products, libraries, and applications tend to be appropriate objects of focus when seeking the right level to analyze the impact of a vulnerability. -For example, when reasoning about the technical impact of a vulnerability that is localized to a function in a module in an open source library, the nearest relevant discrete abstraction is the library because the patches are made available at the library level. -Similarly, analysis of a vulnerability in closed source database software that supports an enterprise resource management (ERM) system would consider the technical impact to the database, not to the ERM system. - -### Relevant Security Policy - -Our definition of a vulnerability includes a security policy violation, but it does not clarify whose security policies are relevant [@householder2020cvd]. -For an organizational PSIRT or CSIRT, the organization's security policy is most relevant. -The answer is less clear for coordinators or ISACs. -An example scenario that brings the question into focus is phone OS jailbreak methods. -The owner of the phone may elect to jailbreak it; there is at least an implicit security policy from the owner that allows this method. -However, from the perspective of the explicit phone OS security policy embedded in the access controls and separation of privileges, the jailbreak is exploiting a vulnerability. -If a security policy is embedded in technical controls, such as OS access controls on a phone, then anything that violates that security policy is a vulnerability. - -## Reasoning Steps Forward - -This aspect of scope is about immediacy, prevalence, and causal importance. -**Immediacy** is about how soon after the decision point adverse effects should occur to be considered relevant. -**Prevalence** is about how common adverse effects should be to be considered relevant. -**Causal importance** is about how much an exploitation of the software in the cyber-physical system contributes to adverse effects to be considered relevant. -Our recommendation is to walk a pragmatic middle path on all three aspects. -Effects are not relevant if they are merely possible, too infrequent, far distant, or unchanged by the vulnerability. -But effects are relevant long before they are absolutely certain, ubiquitous, or occurring presently. -Overall, we summarize this aspect of scope as *consider credible effects based on known use cases of the software system as a part of cyber-physical systems*. diff --git a/doc/md_src_files/04_01_00_enumerating_stakeholders.md b/doc/md_src_files/04_01_00_enumerating_stakeholders.md new file mode 100644 index 00000000..8e895f4e --- /dev/null +++ b/doc/md_src_files/04_01_00_enumerating_stakeholders.md @@ -0,0 +1,33 @@ +## Enumerating Stakeholders + +Stakeholders in vulnerability management can be identified within multiple independent axes. +For example, they can be identified by their responsibility: whether the group *supplies*, *deploys*, or *coordinates* remediation actions. +Depending what task a team is performing in a supply chain, the team may be considered a supplier, deployer, or a coordinator. +Therefore, one organization may have teams that take on different roles. +For example, an organization that develops and uses its own software might delegate the supplier role to its development team and the deployer role to its IT operations team. +On the other hand, organizations using a DevOps approach to providing services might have a single group responsible for both the supplier and deployer roles. +Organizations may also be distinguished by the type of industry sector. While it might be useful to enumerate all the sectors of the economy, we propose to draft decision points that include those from multiple important sectors. +For example, we have safety-related questions in the decision path for all suppliers and deployers. +The decision will be assessed whether or not the stakeholder is in a safety-critical sector. + +The choice not to segregate the decisions by sector is reinforced by the fact that any given software system might be used by different sectors. +It is less likely that one organization has multiple responsibilities within the vulnerability management process. +Even if there is overlap within an organization, the two responsibilities are often located in distinct business units with distinct decision-making processes. +We can treat the responsibilities as non-overlapping, and, therefore, we can build two decision trees—one for each of the “patch supplier” and “patch deployer” responsibilities in the vulnerability management process. +We leave “coordinating patches” as future work. +Each of these trees will have different decision points that they take to arrive at a decision about a given unit of work. + + +The next two sections describe the decision space and the relevant decision points that we propose for these two responsibilities within the vulnerability management process. + +The target audience for this paper is professional staff responsible for making decisions about information systems. +This audience encompasses a broad class of professionals, including suppliers, system maintainers, and administrators of many types. +It also includes other roles, such as risk managers, technical managers, and incident responders. +Although every layperson who owns a computing device makes decisions about managing it, they are not the target audience. +The following decision system may help such laypeople, but we do not intend it to be used by that audience. + +While C-level executives and public policy professionals often make, shape, or incentivize decisions about managing information systems, they are not the target audience, either. +To the extent that decision trees for vulnerability management help higher level policy decisions, we believe the best way to help policy makers is by making technical decisions more transparent and explainable. +Policy makers may see indirect benefits, but they are not our primary audience and we are not designing an approach for them directly. + + diff --git a/doc/md_src_files/04_02_00_enumerating_decisions.md b/doc/md_src_files/04_02_00_enumerating_decisions.md new file mode 100644 index 00000000..0d512b6d --- /dev/null +++ b/doc/md_src_files/04_02_00_enumerating_decisions.md @@ -0,0 +1,13 @@ +## Enumerating Decisions + +Stakeholders with different responsibilities in vulnerability management have very different decisions to make. +This section focuses on the differences among organizations based on their vulnerability management responsibilities. +Some decision makers may have different responsibilities in relation to different software. For example, an Android app developer is a developer of the app, but is a deployer for any changes to the Android OS API. +This situation is true for libraries in general. +A web browser developer makes decisions about applying patches to DNS lookup libraries and transport layer security (TLS) libraries. +A video game developer makes decisions about applying patches released to the Unreal Engine. +A medical device developer makes decisions about applying patches to the Linux kernel. The list goes on. +Alternatively, one might view applying patches as including some development and distribution of the updated product. +Or one might take the converse view, that development includes updating libraries. +Either way, in each of these examples (mobile device apps, web browsers, video games, medical devices), we recommend that the professionals making genuine decisions do three things: (1) identify the decisions explicitly, (2) describe how they view their role(s), and (3) identify which software projects their decision relates to. +If their decisions are explicit, then the decision makers can use the recommendations from this document that are relevant to them. diff --git a/doc/md_src_files/04_03_00_enumerating_actions.md b/doc/md_src_files/04_03_00_enumerating_actions.md new file mode 100644 index 00000000..6bfe7fb6 --- /dev/null +++ b/doc/md_src_files/04_03_00_enumerating_actions.md @@ -0,0 +1,129 @@ +## Enumerating Vulnerability Management Actions +SSVC models the decision of “With what priority should the organization take action on a given vulnerability management work unit?” to be agnostic to whether or not a patch is available. +A unit of work means either remediating the vulnerability—such as applying a patch—or deploying a mitigation. Both remediation and mitigations often address multiple identified vulnerabilities. + +The unit of work may be different for different stakeholders. +The units of work can also change as the vulnerability response progresses through a stakeholder's process. +We elucidate these ideas with the following examples. + +### Supplier Units of Work + +On the input side of the Supplier process, Suppliers typically receive reports of vulnerabilities in one or more versions of their product. +Part of the Supplier's task on initial report intake is to resolve the initial report into a set of products and versions that are affected by the reported vulnerability. + +Our working assumption is that for SSVC purposes, the supplier's unit of work is the combination of the vulnerability with each affected product. +This implies the need for Suppliers to be able to resolve whatever they receive to that level of granularity in order to make best use of SSVC. + +Products will often need to be addressed individually because they may have diverse development processes or usage scenarios. +There are a variety of ways a Supplier might need to resolve the set of affected products. For example, they might + +- recognize, on further investigation of the initial report, that additional versions of the product are affected +- discover that other products are affected due to code sharing or programmer error consistent across products +- receive reports of vulnerabilities in third party libraries they utilize in one or more of their products +- receive fix bundles for third party libraries used in one or more of their products (where a fix bundle might resolve multiple vulnerabilities or add new features) + +Without belaboring the point, the above methods are similar to how CVE Numbering Authorities discern “independently fixable vulnerabilities” [@mitre2020cna]. +We also note that SBOM[@manion2019sbom] seems well-placed to aid in that resolution process for the third-party library scenarios. + +In the end, Suppliers provide remediations and/or mitigations for affected products. +A supplier-provided remediation is usually a software update which contains fixes for multiple vulnerabilities and, often, new or improved features. +Supplier output is relevant because it will become input to Deployers. +SSVC focuses only on the remediation in this case; a set of remediations for multiple vulnerabilities is a fix bundle. +Suppliers may also produce mitigations, such as recommended configuration changes, to limit the impact of a vulnerability. + + +### Deployer Units of Work ### + +Deployers are usually in the position of receiving remediations or mitigations from their Suppliers for products they have deployed. +They must then decide whether to deploy the remediation or mitigation to a particular instance (or not). +Whether they have the option of deploying only part of a remediation such as a fix bundle depends on whether the Supplier has engineered their release process to permit that degree of flexibility. +For example, if service packs are fix bundles, the Supplier might choose to release individually deployable fixes as well. + +The vulnerability management process for deployers has at its core the collation of data including +- an inventory of deployed instances of product versions +- a mapping of vulnerabilities to remediations or mitigations +- a mapping of remediations and/or mitigations to product versions + +The first must be collected by the Deployer, while the latter two most often originate from the product Supplier. +Managing this information is generally called **asset management**. +The relationship between SSVC and asset management is discussed further in [Relationship to asset management](#relationship-to-asset-management). + +In turn, Deployers must resolve this information into specific actions in which a remediation or mitigation is slated for deployment to replace or modify a particular instance of the product. +The Deployer tree in SSVC considers the mission and safety risks inherent to the category of systems to which those deployed instances belong. +For this reason, we recommend that the pairing of remediation or mitigation to a product version instance constitutes the unit of work most appropriate for the SSVC. + +### Coordinator Units of Work ### + +Coordinator units of work tend to coincide with whatever arrives in a single report, which spans the range from a single vulnerability affecting a specific version of an individual product from one Supplier all the way to fundamental design flaws in system specifications that could affect every Supplier and product that uses or implements the flawed specification. +Coordinators may need to reorganize reports (e.g., merge, split, expand, or contract) according to their workflow demands. SSVC can be applied to either the initial report or to the results of such refinement. + +### Aggregation of SSVC across units of work + +SSVC users should answer the suggested questions for whatever discrete unit of work they are considering. There is not necessarily a reliable function to aggregate a recommendation about remediation out of its constituent vulnerabilities. For the sake of simplicity of examples, we treat the remediation as a patch of one vulnerability, and comment on any difficulty in generalizing our advice to a more complex patch where appropriate. + +To further clarify terms, “Remediation occurs when the vulnerability is eliminated or removed. Mitigation occurs when the impact of the vulnerability is decreased without reducing or eliminating the vulnerability” [@dodi_8531_2020, section 3.5]. Examples of remediation include applying patches, fixes and upgrades; or removing the vulnerable software or system from operation. Mitigating actions may include software configuration changes, adding firewall ACLs, or otherwise limiting the system's exposure to reduce the risk of the impact of the vulnerability; or accepting the risk. + +### Supplying Patches + +At a basic level, the decision at a software development organization is whether to issue a work order and what resources to expend to remediate a vulnerability in the organization’s software. Prioritization is required because, at least in the current history of software engineering, the effort to patch all known vulnerabilities will exceed available resources. The organization considers several other factors to build the patch; refactoring a large portion of the code base may be necessary for some patches, while others require relatively small changes. +We focus only on the priority of building the patch, and we consider four categories of priority, as outlined in [Table 2](#table-supplier-outcomes). + +Table: Proposed Meaning for Supplier Priority Outcomes + +| Supplier Priority | Description | +| :--- | :---------- | +| Defer | Do not work on the patch at present. | +| Scheduled | Develop a fix within regularly scheduled maintenance using supplier resources as normal. | +| Out-of-Cycle | Develop mitigation or remediation out-of-cycle, taking resources away from other projects and releasing the fix as a security patch when it is ready. | +| Immediate | Develop and release a fix as quickly as possible, drawing on all available resources, potentially including drawing on or coordinating resources from other parts of the organization. | + +### Deploying Patches + +A mitigation that successfully changes the value of a decision point may shift the priority of further action to a reduced state. An effective firewall or IDS rule coupled with an adequate change control process for rules may be enough to reduce the priority where no further action is necessary. In the area of Financial impacts, a better insurance policy may be purchased, providing necessary fraud insurance. Physicial well-being impact may be reduced by testing the physicial barriers designed to restrict a robot's ability to interact with humans. Mission impact could be reduced by correcting the problems identified in a disaster recover test-run of the alternate business flow. If applying a mitigation reduces the priority to *defer*, the deployer may not need to apply a remediation if it later becomes available. [Table 3](#table-deployer-outcomes) displays the action priorities for the deployer, which are similar to the supplier case. + +When remediation is available, usually the action is to apply it. When remediation is not yet available, the action space is more diverse, but it should involve mitigating the vulnerability (e.g., shutting down services or applying additional security controls) or accepting the risk of not mitigating the vulnerability. Applying mitigations may change the value of decision points. For example, effective firewall and IDS rules may change [*System Exposure*](#system-exposure) from open to controlled. Financial well-being, a [*Safety Impact*](#safety-impact) category, might be reduced with adequate fraud detection and insurance. Physical well-being, also a [*Safety Impact*](#safety-impact) category, might be reduced by physical barriers that restrict a robot's ability to interact with humans. [*Mission Impact*](#mission-impact) might be reduced by introducing back-up business flows that do not use the vulnerable component. In a later section we combine [Mission and Situated Safety Impact](#table-mission-safety-combined) to reduce the complexity of the tree. + +However, these mitigation techniques will not always work. For example, the implementation of a firewall or IDS rule to mitigate [*System Exposure*](#system-exposure) from open to controlled is only valid until someone changes the rule. In the area of Financial impacts, the caps on the insurance may be too low to act as a mitigation. +The Physical impact may be increased by incorrect installation of the physical barriers designed to restrict a robot’s ability to interact with humans. +The [*Mission Impact*](#mission-impact) could be increased when a disaster recovery test-run identifies problems with an alternate business flow. The mitigating action may not be permanent or work as designed. + +A mitigation that successfully changes the value of a decision point may shift the priority of further action to a reduced state. If applying a mitigation reduces the priority to *defer*, the deployer may not need to apply a remediation, if later, it becomes available. Table 3 displays the action priorities for the deployer, which are similar to the supplier case. + +In a later section, the different types of impacts are defined and then implemented in the decision trees as examples of how the various impacts affect the priority. +For now, assume the decision points are ordered as: [*Exploitation*](#exploitation); [*Exposure*](#exposure); [*Utility*](#utility); and [*Human Impact*](#human-impact). +In this order, an [_active_](#exploitation) state of [*Exploitation*](#exploitation) will never result in a *defer* priority. +A [_none_](#exploitation) state of [*Exploitation*](#exploitation) (no evidence of exploitation) will result in either *defer* or *scheduled* priority—unless the state of [*Human Impact*](#human-impact) is [_very high_](#human-impact), resulting in an *out-of-cycle* priority. + +As opposed to mitigation, applying a remediation finishes an SSVC analysis of a deployed system. +While specific vulnerabilities in specific systems can be remediated, the vulnerability cannot be 'disposed of' or eliminated from future consideration within an IT environment. +Since software and systems are dynamic, a single vulnerability can be re-introduced after initial remediation through updates, software rollbacks, or other systemic actions that change the operating conditions within an environment. +It is therefore important to continually monitor remediated environments for vulnerabilities reintroduced by either rollbacks or new deployments of outdated software. + + +Table: Proposed Meaning for Deployer Priority Outcomes + +| Deployer Priority | Description | +| :--- | :---------- | +| Defer | Do not act at present. | +| Scheduled | Act during regularly scheduled maintenance time. | +| Out-of-cycle | Act more quickly than usual to apply the mitigation or remediation out-of-cycle, during the next available opportunity, working overtime if necessary. | +| Immediate | Act immediately; focus all resources on applying the fix as quickly as possible, including, if necessary, pausing regular organization operations. | + +### Coordinating Patches +In coordinated vulnerability disclosure (CVD), there are two available decisions modelled in version 2 of SSVC. +The first is whether or not to coordinate a vulnerability report. +This decision is also known as triage. +Vulnerability Response Decision Assistance (VRDA) provides a starting point for a decision tree for this situation. +VRDA is likely adequate for national-level CSIRTs that do general CVD, but other CSIRT types may have different needs. +The *CERT guide to Coordinated Vulnerability Disclosure* provides something similar for those who are deciding how to report and disclose vulnerabilities they have discovered [@householder2020cvd, section 6.10]. +The second decision is whether to publish information about a vulnerability. +We omit a table for this decision because the options are *do not publish* or *publish*. + +Table: Proposed Coordinator Triage Priority Outcomes + +| Triage Priority | Description | +| :--- | :---------- | +| Decline | Do not act on the report. | +| Track | Receive information about the vulnerability and monitor for status changes but do not take any overt actions. | +| Coordinate | Take action on the report. “Action” may include any one or more of: technical analysis, reproduction, notifying vendors, publication, and assist another party. | + diff --git a/doc/md_src_files/04_04_00_items_with_same_priority.md b/doc/md_src_files/04_04_00_items_with_same_priority.md new file mode 100644 index 00000000..73d4c5ed --- /dev/null +++ b/doc/md_src_files/04_04_00_items_with_same_priority.md @@ -0,0 +1,8 @@ +## Items With the Same Priority + +Within each setting, the decisions are a kind of equivalence class for priority. That is, if an organization must deploy patches for three vulnerabilities, and if these vulnerabilities are all assigned the *scheduled* priority, then the organization can decide which to deploy first. The priority is equivalent. This approach may feel uncomfortable since CVSS gives the appearance of a finer grained priority. CVSS appears to say, “Not just 4.0 to 6.9 is ‘medium’ severity, but 4.6 is more severe than 4.5.” However, as discussed previously (see page 4), CVSS is designed to be accurate only within +/- 0.5, and, in practice, is scored with errors of around +/- 1.5 to 2.5 [@allodi2018effect, see Figure 1]. An error of this magnitude is enough to make all of the “normal” range from 4.0 to 6.9 equivalent, because 5.5 +/- 1.5 is the range 4.0 to 7.0. Our proposal is an improvement over this approach. CVSS errors often cross decision boundaries; in other words, the error range often includes the transition between “high” and “critical” or “medium.” Since our approach keeps the decisions qualitatively defined, this fuzziness does not +affect the results. + +Returning to the example of an organization with three vulnerabilities to remediate that were assigned *scheduled* priority, in SSVC, they can be patched in any order. This is an improvement over CVSS, since based on the scoring errors, CVSS was essentially just giving random fine-grained priorities within qualitative categories anyway. With our system, organizations can be more deliberate about conveniently organizing work that is of equivalent priority. + + diff --git a/doc/md_src_files/04_05_00_risk_tolerance_and_priority.md b/doc/md_src_files/04_05_00_risk_tolerance_and_priority.md new file mode 100644 index 00000000..5e30f207 --- /dev/null +++ b/doc/md_src_files/04_05_00_risk_tolerance_and_priority.md @@ -0,0 +1,16 @@ +## Risk Tolerance and Response Priority + +SSVC enables stakeholders to balance and manage their risks themselves. +We follow the risk management vocabulary from [@ISO73] and define risk as “effect of uncertainty on objectives;” see [@ISO73] for notes on the terms in the definition. +A successful vulnerability management practice must balance at least two risks: + +1. Change risk: the potential costs of deploying remediation, which include testing and deployment in addition to any problems that could arise from making changes to production systems. +2. Vulnerability risk: the potential costs of incidents resulting from exploitation of vulnerable systems + +To place these risks in context, we follow the SEI's Taxonomy of Operational Cyber Security Risks [@cebula2010taxonomy]. Change risk can be characterized as a combination of Class 2 and/or Class 3 risks. Class 2: Systems and Technology Failures includes hardware, software, and systems risks. Class 3: Failed Internal Processes can arise from process design, process execution, process controls, or supporting processes. Meanwhile, vulnerability risk falls into Subclass 1.2: Actions of People: Deliberate. + +In developing the decision trees in this document, we had in mind stakeholders with a moderate tolerance for risk. The resulting trees reflect that assumption. Organizations may of course be more or less conservative in their own vulnerability management practices, and we cannot presume to determine how an organization should balance their risk. + +We therefore remind our readers that the labels on the trees (defer, immediate, etc.) can and should be customized to suit the needs of individual stakeholders wherever necessary and appropriate. For example, an organization with a high aversion to change risk might choose to accept more vulnerability risk by lowering the overall response labels for many branches in the trees, resulting in fewer vulnerabilities attaining the most urgent response. On the other hand, an organization with a high aversion to vulnerability risk could elevate the priority of many branches to ensure fixes are deployed quickly. + + diff --git a/doc/md_src_files/04_06_00_scope.md b/doc/md_src_files/04_06_00_scope.md new file mode 100644 index 00000000..77cb83d8 --- /dev/null +++ b/doc/md_src_files/04_06_00_scope.md @@ -0,0 +1,67 @@ +## Scope + +Scope is an important variable in the answers of these decision points. +It has at least three aspects. +The first is how the boundaries of the affected system are set. +The second is whose security policy is relevant. +The third is how far forward in time or causal steps one reasons about effects and harms. +We put forward recommendations for each of these aspects of scope. + +However, users of the decision process may want to define different scopes. +Users may define a different scope as long as the scope (1) is consistent across decisions, and (2) is credible, explicit, and accessible to all relevant decision makers. + +For example, suppliers often decline to support products beyond a declared end-of-life (EOL) date. In these cases, a supplier could reasonably consider vulnerabilities in those products to be out of scope. However, a deployer may still have active instances of EOL products in their infrastructure. It remains appropriate for a deployer to use SSVC to prioritize their response to such situations, since even if there is no remediation forthcoming from the supplier it may be possible for the deployer to mitigate or remediate the vulnerability in other ways, up to and including decommissioning the affected system(s). + +### Boundaries of the Affected System + +One distinction is whether the system of interest is software per se or a cyber-physical system. +A problem is that in every practical case, both are involved. +Software is what has vulnerabilities and is what vulnerability management is focused on remediating. +Multiple pieces of software run on any given computer system. +To consider software vulnerabilities in a useful scope, we follow prior work and propose that a vulnerability affects “the set of software instructions that executes in an environment with a coherent function and set of permissions” [@spring2015global]. +This definition is useful because it lets us keep to common usage and intuition and call the Linux kernel—at least a specific version of it—“one piece” of software. + +But decision points about safety and mission impact are not about the software in isolation; they are about the entire cyber-physical system, of which the software is a part. +The term “physical” in “cyber-physical system” should be interpreted broadly; selling stocks or manipulating press outlet content are both best understood as affecting human social institutions. +These social institutions do not have much of a corporeal instantiation, but they are physical in the sense that they are not merely software, and so are parts of cyber-physical systems. + +The hard part of delineating the boundaries of the affected system is specifying what it means for one system to be part of another system. +Just because a computer is bolted to a wall does not mean the computer is part of the wall’s purpose, which is separating physical space. +At the same time, an off-premises DNS server may be part of the site security assurance system if the on-premises security cameras rely on the DNS server to connect to the displays at the guard’s desk. +We define computer software as part of a cyber-physical system if the two systems are mutually manipulable; that is, changes in the part (the software) will (at least, often) make detectable and relevant changes to the whole (the cyber-physical system), and changes in the whole will (often) make relevant and detectable changes in the part [@spring2018generalization]. + +When reasoning about a vulnerability, we assign the vulnerability to the nearest, relevant—yet more abstract—discrete component. +This assignment is particularly important when assessing technical impact on a component. This description bears some clarification, via each of the adjectives: + + - **Nearest** is relative to the abstraction at which the vulnerability exists. + + - **Relevant** implies that the impacted component must be in the chain of abstraction moving upward from the location of the flaw. + + - **More abstract** means that the impacted component is at least one level of abstraction above the specific location of the vulnerability. For example, if the vulnerability is localized to a single line of code in a function, then the function, the module, the library, the application, the product, and the system it belongs to are all “more abstract.” + + - **Discrete** means there is an identifiable thing that can be remediated (e.g., the unit of patching). + +Products, libraries, and applications tend to be appropriate objects of focus when seeking the right level to analyze the impact of a vulnerability. +For example, when reasoning about the technical impact of a vulnerability that is localized to a function in a module in an open source library, the nearest relevant discrete abstraction is the library because the patches are made available at the library level. +Similarly, analysis of a vulnerability in closed source database software that supports an enterprise resource management (ERM) system would consider the technical impact to the database, not to the ERM system. + +### Relevant Security Policy + +Our definition of a vulnerability includes a security policy violation, but it does not clarify whose security policies are relevant [@householder2020cvd]. +For an organizational PSIRT or CSIRT, the organization's security policy is most relevant. +The answer is less clear for coordinators or ISACs. +An example scenario that brings the question into focus is phone OS jailbreak methods. +The owner of the phone may elect to jailbreak it; there is at least an implicit security policy from the owner that allows this method. +However, from the perspective of the explicit phone OS security policy embedded in the access controls and separation of privileges, the jailbreak is exploiting a vulnerability. +If a security policy is embedded in technical controls, such as OS access controls on a phone, then anything that violates that security policy is a vulnerability. + +### Reasoning Steps Forward + +This aspect of scope is about immediacy, prevalence, and causal importance. +**Immediacy** is about how soon after the decision point adverse effects should occur to be considered relevant. +**Prevalence** is about how common adverse effects should be to be considered relevant. +**Causal importance** is about how much an exploitation of the software in the cyber-physical system contributes to adverse effects to be considered relevant. +Our recommendation is to walk a pragmatic middle path on all three aspects. +Effects are not relevant if they are merely possible, too infrequent, far distant, or unchanged by the vulnerability. +But effects are relevant long before they are absolutely certain, ubiquitous, or occurring presently. +Overall, we summarize this aspect of scope as *consider credible effects based on known use cases of the software system as a part of cyber-physical systems*. From 5e43ed2f6ead788d69ac377487ceb0673ae6672b Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 14:28:56 -0400 Subject: [PATCH 06/12] split ch7 by section --- doc/md_src_files/07_00_00_prioritization.md | 294 ------------------ doc/md_src_files/07_01_00_supplier_tree.md | 18 ++ doc/md_src_files/07_02_00_deployer_tree.md | 13 + .../07_03_00_coordinator_trees.md | 32 ++ .../07_04_00_tree_customization.md | 172 ++++++++++ .../07_05_00_evidence_gathering.md | 23 ++ doc/md_src_files/07_06_00_asset_management.md | 27 ++ .../07_07_00_development_methodology.md | 9 + 8 files changed, 294 insertions(+), 294 deletions(-) create mode 100644 doc/md_src_files/07_01_00_supplier_tree.md create mode 100644 doc/md_src_files/07_02_00_deployer_tree.md create mode 100644 doc/md_src_files/07_03_00_coordinator_trees.md create mode 100644 doc/md_src_files/07_04_00_tree_customization.md create mode 100644 doc/md_src_files/07_05_00_evidence_gathering.md create mode 100644 doc/md_src_files/07_06_00_asset_management.md create mode 100644 doc/md_src_files/07_07_00_development_methodology.md diff --git a/doc/md_src_files/07_00_00_prioritization.md b/doc/md_src_files/07_00_00_prioritization.md index 6a6746cd..35c3d0e0 100644 --- a/doc/md_src_files/07_00_00_prioritization.md +++ b/doc/md_src_files/07_00_00_prioritization.md @@ -17,297 +17,3 @@ This section presents example trees for each stakeholder: supplier, deployer, an This section also provides some guidance on how to [construct and customize a decision tree](#tree-construction-and-customization-guidance) and [gather evidence](#evidence-gathering-guidance) to make decisions. How this decision information might be stored or communicated is the topic of subsections on [Asset Management](#relationship-to-asset-management) and [Communication](#guidance-on-communicating-results). - - - - -## Supplier Tree - -The example supplier tree [PDF](../graphics/ssvc_2_supplier.pdf) shows the proposed prioritization decision tree for the supplier. Both supplier and deployer trees use the above decision point definitions. Each tree is a compact way of expressing assertions or hypotheses about the relative priority of different situations. Each tree organizes how we propose a stakeholder should treat these situations. Rectangles are decision points, and triangles represent outcomes. The values for each decision point are different, as described above. Outcomes are priority decisions (defer, scheduled, out-of-cycle, immediate); outcome triangles are color coded: - - - Defer = gray with green outline - - Scheduled = yellow - - Out-of-Cycle = orange - - Immediate = red with black outline - -![Suggested Supplier Tree](../graphics/ssvc_2_supplier.pdf){ width=100% } - - - - -## Deployer Tree - -The example deployer tree [PDF](../graphics/ssvc_2_deployer_SeEUMss.pdf) is depicted below. - - -![Suggested Deployer Tree](../graphics/ssvc_2_deployer_SeEUMss.pdf){ width=100% } - - - -## Coordinator Trees - -As described in [Decisions During Vulnerability Coordination](#decisions-during-vulnerability-coordination), a coordination stakeholder usually makes separate triage and publication decisions. Each have trees presented below. - -### Triage Decision Tree - - -![Suggested Coordinator Triage Tree](../graphics/ssvc_2_coord-triage.pdf){ width=100% } - - - -This tree is a suggestion in that CERT/CC believes it works for us. -Other coordinators should consider customizing the tree to their needs, as described in [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance). - -### Publication Decision Tree - -Suggested decision values for this decision are available in [CSV](../../data/csvs/ssvc_2_coord-publish.csv) and [PDF](../graphics/ssvc_2_coord-publish.pdf) formats. - - -![Suggested Coordinator Publication Tree](../graphics/ssvc_2_coord-publish.pdf){ width=100% } - - - - -## Tree Construction and Customization Guidance - -Stakeholders are encouraged to customize the SSVC decision process to their needs. -Indeed, the first part of SSVC stands for “stakeholder-specific." -However, certain parts of SSVC are more amenable to customization than others. -In this section, we'll cover what a stakeholder should leave static, what we imagine customization looks like, and some advice on building a usable and manageable decision tree based on our experience so far. - -We suggest that the decision points, their definitions, and the decision values should not be customized. -Different vulnerability management teams inevitably think of topics such as [*Utility*](#utility) to the adversary in slightly different ways. -However, a key contribution of SSVC is enabling different teams to communicate about their decision process. -In order to clearly communicate differences in the process, the decision points that factor into the process need to be the same between different teams. -A stakeholder community may come together and, if there is broad consensus, add or change decision points. - -Which decision points are involved in a vulnerability management team's decision and the priority label for each resulting situation are, for all intents and purposes, totally at the discretion of the team. -We have provided some examples for different stakeholder communities here. -What decision points a team considers reflects what it cares about and the risks prioritizes. -Different teams may legitimately prioritize different objectives. -It should be easier for teams to discuss and communicate such differences if the definitions of the decision points remain static. -The other aspect of risk management that SSVC allows a team to customize is its risk appetite or risk tolerance. - -A team's risk appetite is reflected directly by the priority labels for each combination of decision values. -For example, a vulnerability with [no or minor](#public-safety-impact) [*Public Safety Impact*](#public-safety-impact), [total](#technical-impact) [*Technical Impact*](#technical-impact), and [efficient](#utility) [*Utility*](#utility) might be handled with [*scheduled*](#supplier-decisions) priority by one team and [*out-of-cycle*](#supplier-decisions) priority by another. -As long as each team has documented this choice and is consistent in its own application of its own choice, the two teams can legitimately have different appetites for vulnerability risk. -SSVC enables teams with such different risk appetites to discuss and communicate precisely the circumstances where they differ. - -When doing the detailed risk management work of creating or modifying a tree, we recommend working from text files with one line or row for each unique combination of decision values. -For examples, see [SSVC/data](https://github.com/CERTCC/SSVC/tree/main/data). -An important benefit, in our experience, is that it is easier to identify a question by saying “I'm unsure about row 16” than anything else we have thought of so far. -Once the humans agree on the decision tree, it can be converted to a JSON schema for easier machine-readable communication, following the provided [SSVC provision JSON schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Provision_v2.03.schema.json). - -Once the decision points are selected and the prioritization labels agreed upon, it is convenient to be able to visually compress the text file by displaying it as a decision tree. -Making the decision process accessible has a lot of benefits. -Unfortunately, it also makes it a bit too easy to overcomplicate the decision. - -The academic literature surrounding the measurement of decision tree quality is primarily concerned with measuring classification errors given a particular tree and a labeled data set. -In our case, we are not attempting to fit a tree to data. -Rather, we are interested in producing usable trees that minimize extraneous effort. -To that end, we briefly examine the qualities for which decision tree measurement is suitable. - -### Decision Tree Construction Concerns - -Decision tree construction methods must address five significant concerns: -- feature selection -- feature type -- overfitting -- parsimony -- versioning - -#### Feature selection - -Feature selection is perhaps the most important consideration for SSVC, because it directly affects the information gathering requirements placed on the analyst attempting to use the tree. -Each decision point in SSVC is a feature. - -The SSVC version 1 ~applier~ deployer tree had 225 rows when we wrote it out in long text form. -It only has four outcomes to differentiate between. -Thus, on average that decision process treats one situation (combination of decision values) as equivalent to 65 other situations. -If nothing else, this means analysts are spending time gathering evidence to make fine distinctions that are not used in the final decision. -The added details also make it harder for the decision process to accurately manage the risks in question. -This difficulty arises because more variance and complexity there is in the decision increases the possibility of errors in the decision process itself. - -#### Feature types - -Regarding feature types, all of the features included in SSVC version 2 can be considered ordinal data. -That is, while they can be ordered (e.g., for Exploitation, active is greater than poc is greater than none), they can not be compared via subtraction or division (active - poc = nonsense). -The use of ordinal features is a key assumption behind our use of the parsimony analysis that follows. - -#### Overfitting - -When decision trees are used in a machine learning context, overfitting increases tree complexity by incorporating the noise in the training data set into the decision points in a tree. -In our case, our “data” is just the set of outcomes as decided by humans, so overfitting is less of a concern, assuming the feature selection has been done with care. - -#### Parsimony -Parsimony is, in essence, Occam's Razor applied to tree selection. Given the choice between two trees that have identical outputs, one should choose the tree with fewer decisions. -One way to evaluate the parsimony of a tree is by applying the concept of feature importance to ensure that each feature is contributing adequately to the result. -While there are a few ways to compute feature importance, the one we found most useful is permutation importance. -Permutation importance can be calculated on a candidate tree to highlight potential issues. -It works by randomly shuffling the values for each feature individually and comparing a fitness metric on the shuffled tree to the original. -The change in fitness is taken to be the importance of the feature that was shuffled. -Permutation importance is usually given as a number in the interval [0,1]. -Python's scikit-learn provides a permutation importance method, which we used to evaluate our trees. - -Interpreting the results of a permutation importance computation on a tree involves nuance, but one rule we can state is this: -any feature with a computed permutation importance of zero can be eliminated from the tree without losing any relevant information. -When all of the permutation importance scores for all features are relatively equal, that is an indication that each feature is approximately equally relevant to the decision. - -More likely, however, is that some subset of features will be of relatively equal importance, and one might be of considerably lower importance (yet not zero). -In this case, the lowest importance feature should be considered for refinement or elimination. -It is possible that adjusting the definition of a feature or its available values (whether redefining, adding, or removing options) could increase its importance. -Reasons to retain a low-importance feature include: -* the feature is relevant to a small set of important circumstances that a tree without the feature would otherwise be unable to discriminate -* the effort required to determine the correct value for the feature is relatively small, for example information that might be collected automatically -* the feature enables other features to be defined more clearly -Features that meet none of the above criteria may be good candidates for elimination. - -Customizing a tree by changing the outcome priority labels can also affect the importance of a feature. -This sort of customization is often the simplest way to adjust the importance of a feature. - -While there is no hard and fast rule for when a tree is too big, we suggest that if all of your outcomes are associated with more than 15 situations (unique combinations of decision values), you would benefit from asking whether your analysts actually use all the information they would be gathering. -Thus, 60 unique combinations of decision values is the point at which a decision tree with four distinct outcomes is, on average, potentially too big. - -#### Tree Versioning - -SSVC trees should be identifiable by name and version. A tree name is simply a short descriptive label for the tree derived from the stakeholder and/or function the tree is intended for. Tree versions are expected to share the major and minor version numbers with the SSVC version in which their decision points are defined. Revisions should increment the patch number. For example: “Applier Tree v1.1.0” would be the identity of the version of the Applier Tree as published in version 1.1 of SSVC. -“Coordinator Publish Tree v2.0.3” would be the identity of a future revision of the Coordinator Publish Tree as described in this document. The terms “major”, “minor”, and “patch” with respect to version numbering are intended to be consistent with [Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html). - -### Sharing Trees With Others - -Communities of shared interest may desire to share information about decision points or even create custom trees to share within their community. -Examples include: -- an Information Sharing and Analysis Organization (ISAO) within a critical infrastructure sector might want to define a custom decision point relevant to their constituents' regulatory compliance. -- a corporate Computer Security Incident Response Team (CSIRT) might choose to adjust decision priorities for an existing tree for use by its subsidiaries. -- a government department might define a separate tree using existing decision points to address a particular governance process within their constituent agencies. -- a regional coordinator might want to produce decision point information as a product of its threat analysis work and provide this information to its constituency in an advisory. - -In these and other scenarios, there are two scopes to consider: -1. Decision Point Scope -2. Decision Tree Scope - -#### Decision Point Scope - -Each decision point defined in this document has a characteristic scope, either *stakeholder-agnostic* or *stakeholder-specific*. - -- **Stakeholder-agnostic decision points** describe the state of the world outside the stakeholder's environment. -One might think of them as global facts that form the background context in which the stakeholder is making a prioritization decision. -Nearly all stakeholders should agree on the assignment of specific values to these decision points. -- **Stakeholder-specific decision points** are expected to be contextual to some set of stakeholders. -Information about a stakeholder-specific decision point can still be inherited by other stakeholders using the same tree. -For example in the corporate CSIRT scenario above, the [*System Exposure*](#system-exposure) value might be consistent across all subsidiaries for a centrally managed service. - -We generally consider the following decision points to be *stakeholder-agnostic*: -- [*Exploitation*](#exploitation) -- [*Technical Impact*](#technical-impact) -- [*Automatable*](#automatable) - -On the contrary, we consider the following decision points to be *stakeholder-specific*: -- [*Value Density*](#value-density) -- [*Utility*](#utility) -- [*Safety Impact*](#safety-impact) -- [*Public Safety Impact*](#public-safety-impact) -- [*Situated Safety Impact*](#situated-safety-impact) -- [*Mission Impact*](#mission-impact) -- [*Human Impact*](#human-impact) -- [*System Exposure*](#system-exposure) - -We anticipate that most custom decision points created by stakeholders for themselves or a constituency will be of the *stakeholder-specific* variety. -Examples of these sorts of custom decision points include -- A decision point indicating whether a system or mission context is affected by regulatory oversight that might alter the decision priority. -E.g., a healthcare-focused ISAO might define a decision point about whether a vulnerability affects patient data privacy protection. -- A decision point that incorporates the concept of change risk to a deployer. -E.g., a financial institution might have a very low tolerance for changes to a transaction clearing system. -- A decision point that indicates whether the affected software belongs to a list of critical software for a specific constituency. -E.g., an open-source consortium might want to prioritize fix development for a set of key projects. - -#### Decision Tree Scope - -Two kinds of modifications are possible at the decision tree level. - -- A *Risk Appetite Shift* retains the structure of an existing tree and all its decision points, and simply adjusts the decision outputs according to the stakeholder's risk appetite. -For example, an organization with sufficient resources to efficiently deploy fixes might choose to defer fewer cases than the default tree would recommend. -- *Tree Customization* can be done in one of three ways: - 1. incorporating an already-defined decision point into an existing tree that does not already contain it. - 2. defining a new decision point and adding it to an existing tree. -Note that adding or removing an option from an existing decision point should be treated as creating a new decision point. -The new decision point should be given a distinct name as well. - 3. defining a new tree entirely from existing or new decision points - -Because tree customization changes the tree structure and implies the addition or removal of leaf nodes, it will be necessary for the organization to review the decision outputs in light of its risk appetite as well. - -Risk-shifted or customized trees can be shared among a community of interest, of course. -Further customization within each stakeholder remains an option as well, although there is likely a diminishing return on more than a few layers of customization for the same basic decision. -Of course, SSVC users might choose to construct other trees to inform other decisions. - -## Guidance for Evidence Gathering - -To answer each of these decision points, a stakeholder should, as much as possible, have a repeatable evidence collection and evaluation process. However, we are proposing decisions for humans to make, so evidence collection and evaluation is not totally automatable. That caveat notwithstanding, some automation is possible. - -For example, whether exploitation modules are available in ExploitDB, Metasploit, or other sources is straightforward. -We hypothesize that searching Github and Pastebin for exploit code can be captured in a script. -A supplier or deployer could then define [*Exploitation*](#exploitation) to take the value of [*PoC*](#exploitation) if there are positive search results for a set of inputs derived from the CVE entry in at least one of these venues. -At least, for those vulnerabilities that are not “automatically” PoC-ready, such as on-path attackers for TLS or network replays. - -Some of the decision points require a substantial upfront analysis effort to gather risk assessment or organizational data. However, once gathered, this information can be efficiently reused across many vulnerabilities and only refreshed occasionally. An obvious example of this is the mission impact decision point. To answer this, a deployer must analyze their essential functions, how they interrelate, and how they are supported. Exposure is similar; answering that decision point requires an asset inventory, adequate understanding of the network topology, and a view of the enforced security controls. Independently operated scans, such as Shodan or Shadowserver, may play a role in evaluating exposure, but the entire exposure question cannot be reduced to a binary question of whether an organization’s assets appear in such databases. Once the deployer has the situational awareness to understand MEFs or exposure, selecting the answer for each individual vulnerability is usually straightforward. - -Stakeholders who use the prioritization method should consider releasing the priority with which they handled the vulnerability. This disclosure has various benefits. For example, if the supplier publishes a priority ranking, then deployers could consider that in their decision-making process. One reasonable way to include it is to break ties for the deployer. If a deployer has three “scheduled” vulnerabilities to remediate, they may address them in any order. If two vulnerabilities were produced by the supplier as “scheduled” patches, and one was “out-of-cycle,” then the deployer may want to use that information to favor the latter. - -In the case where no information is available or the organization has not yet matured its initial situational analysis, we can suggest something like defaults for some decision points. -If the deployer does not know their exposure, that means they do not know where the devices are or how they are controlled, so they should assume [*System Exposure*](#system-exposure) is [*open*](#system-exposure). -If the decision maker knows nothing about the environment in which the device is used, we suggest assuming a [*major*](#safety-impact) [*Safety Impact*](#safety-impact). This position is conservative, but software is thoroughly embedded in daily life now, so we suggest that the decision maker provide evidence that no one’s well-being will suffer. The reach of software exploits is no longer limited to a research network. -Similarly, with [*Mission Impact*](#mission-impact), the deployer should assume that the software is in use at the organization for a reason, and that it supports essential functions unless they have evidence otherwise. -With a total lack of information, assume [*support crippled*](#mission-impact) as a default. -[*Exploitation*](#exploitation) needs no special default; if adequate searches are made for exploit code and none is found, the answer is [*none*](#exploitation). -If nothing is known about [*Automatable*](#automatable), the safer answer to assume is [*yes*](#automatable). -[*Value Density*](#value-density) should always be answerable; if the product is uncommon, it is probably [*diffuse*](#value-density). -The resulting decision set {*none*, *open*, *efficient*, *medium*} results in a scheduled patch application in our recommended deployer tree. - -## Relationship to asset management - -Vulnerability management is a part of asset management. -SSVC can benefit from asset management practices and systems, particularly in regard to automating data collection and answers for some decision points. -SSVC depends on asset management to some extent, particularly for context on the cost and risk associated with changing or updating the asset. - -Asset management can help automate the collection of the [*Mission Impact*](#mission-impact), [*Situated Safety Impact*](#situated-safety-impact), and [*System Exposure*](#system-exposure) decision points. -These decision points tend to apply per asset rather than per vulnerability. -Therefore, once each is assessed for each asset, it can be applied to each vulnerability that applies to that asset. -While the asset assessment should be reviewed occasionally for accuracy, storing this data in an asset management system should enable automated scoring of new vulnerabilities on these decision points for those assets. - -Our method is for prioritizing vulnerabilities based on the risk stemming from exploitation. -There are other reasonable asset management considerations that may influence remediation timelines. -There are at least three aspects of asset management that may be important but are out of scope for SSVC. -First and most obvious is the transaction cost of conducting the mitigation or remediation. -System administrators are paid to develop or apply any remediations or mitigations, and there may be other transactional costs such as downtime for updates. -Second is the risk of the remediation or mitigation introducing a new error or vulnerability. -Regression testing is part of managing this type of risk. Finally, there may be an operational cost of applying a remediation or mitigation, representing an ongoing change of functionality or increased overhead. -A decision maker could order work within one SSVC priority class (scheduled, out-of-cycle, etc.) based on these asset management considerations, for example. -Once the organization remediates or mitigates all the high-priority vulnerabilities, they can then focus on the medium-level vulnerabilities with the same effort spent on the high-priority ones. - -Asset management and risk management also drive some of the up-front work an organization would need to do to gather some of the necessary information. -This situation is not new; an asset owner cannot prioritize which fixes to deploy to its assets if it does not have an accurate inventory of its assets. -The organization can pick its choice of tools; there are about 200 asset management tools on the market [@captera]. -Emerging standards like the Software Bill of Materials (SBOM) [@manion2019sbom] would likely reduce the burden on asset management, and organizations should prefer systems which make such information available. -If an organization does not have an asset management or risk management (see also [Gathering Information About Mission Impact](#gathering-information-about-mission-impact)) plan and process in place, then SSVC provides some guidance as to what information is important to vulnerability management decisions and the organization should start capturing, storing, and managing. - -## Development Methodology - -For this tabletop refinement, we could not select a mathematically representative set of CVEs. The goal was to select a handful of CVEs that would cover diverse types of vulnerabilities. The CVEs that we used for our tabletop exercises are CVE-2017-8083, CVE-2019-2712, CVE-2014-5570, and CVE-2017-5753. We discussed each one from the perspective of supplier and deployer. We evaluated CVE-2017-8083 twice because our understanding and descriptions had changed materially after the first three CVEs (six evaluation exercises). After we were satisfied that the decision trees were clearly defined and captured our intentions, we began the formal evaluation of the draft trees, which we describe in the next section. - diff --git a/doc/md_src_files/07_01_00_supplier_tree.md b/doc/md_src_files/07_01_00_supplier_tree.md new file mode 100644 index 00000000..8d38d2bb --- /dev/null +++ b/doc/md_src_files/07_01_00_supplier_tree.md @@ -0,0 +1,18 @@ +## Supplier Tree + +The example supplier tree [PDF](../graphics/ssvc_2_supplier.pdf) shows the proposed prioritization decision tree for the supplier. Both supplier and deployer trees use the above decision point definitions. Each tree is a compact way of expressing assertions or hypotheses about the relative priority of different situations. Each tree organizes how we propose a stakeholder should treat these situations. Rectangles are decision points, and triangles represent outcomes. The values for each decision point are different, as described above. Outcomes are priority decisions (defer, scheduled, out-of-cycle, immediate); outcome triangles are color coded: + + - Defer = gray with green outline + - Scheduled = yellow + - Out-of-Cycle = orange + - Immediate = red with black outline + +![Suggested Supplier Tree](../graphics/ssvc_2_supplier.pdf){ width=100% } + + + + diff --git a/doc/md_src_files/07_02_00_deployer_tree.md b/doc/md_src_files/07_02_00_deployer_tree.md new file mode 100644 index 00000000..a2c75d46 --- /dev/null +++ b/doc/md_src_files/07_02_00_deployer_tree.md @@ -0,0 +1,13 @@ +## Deployer Tree + +The example deployer tree [PDF](../graphics/ssvc_2_deployer_SeEUMss.pdf) is depicted below. + + +![Suggested Deployer Tree](../graphics/ssvc_2_deployer_SeEUMss.pdf){ width=100% } + + diff --git a/doc/md_src_files/07_03_00_coordinator_trees.md b/doc/md_src_files/07_03_00_coordinator_trees.md new file mode 100644 index 00000000..c99fc58b --- /dev/null +++ b/doc/md_src_files/07_03_00_coordinator_trees.md @@ -0,0 +1,32 @@ +## Coordinator Trees + +As described in [Decisions During Vulnerability Coordination](#decisions-during-vulnerability-coordination), a coordination stakeholder usually makes separate triage and publication decisions. Each have trees presented below. + +### Triage Decision Tree + + +![Suggested Coordinator Triage Tree](../graphics/ssvc_2_coord-triage.pdf){ width=100% } + + + +This tree is a suggestion in that CERT/CC believes it works for us. +Other coordinators should consider customizing the tree to their needs, as described in [Tree Construction and Customization Guidance](#tree-construction-and-customization-guidance). + +### Publication Decision Tree + +Suggested decision values for this decision are available in [CSV](../../data/csvs/ssvc_2_coord-publish.csv) and [PDF](../graphics/ssvc_2_coord-publish.pdf) formats. + + +![Suggested Coordinator Publication Tree](../graphics/ssvc_2_coord-publish.pdf){ width=100% } + + + + diff --git a/doc/md_src_files/07_04_00_tree_customization.md b/doc/md_src_files/07_04_00_tree_customization.md new file mode 100644 index 00000000..2d4f4873 --- /dev/null +++ b/doc/md_src_files/07_04_00_tree_customization.md @@ -0,0 +1,172 @@ +## Tree Construction and Customization Guidance + +Stakeholders are encouraged to customize the SSVC decision process to their needs. +Indeed, the first part of SSVC stands for “stakeholder-specific." +However, certain parts of SSVC are more amenable to customization than others. +In this section, we'll cover what a stakeholder should leave static, what we imagine customization looks like, and some advice on building a usable and manageable decision tree based on our experience so far. + +We suggest that the decision points, their definitions, and the decision values should not be customized. +Different vulnerability management teams inevitably think of topics such as [*Utility*](#utility) to the adversary in slightly different ways. +However, a key contribution of SSVC is enabling different teams to communicate about their decision process. +In order to clearly communicate differences in the process, the decision points that factor into the process need to be the same between different teams. +A stakeholder community may come together and, if there is broad consensus, add or change decision points. + +Which decision points are involved in a vulnerability management team's decision and the priority label for each resulting situation are, for all intents and purposes, totally at the discretion of the team. +We have provided some examples for different stakeholder communities here. +What decision points a team considers reflects what it cares about and the risks prioritizes. +Different teams may legitimately prioritize different objectives. +It should be easier for teams to discuss and communicate such differences if the definitions of the decision points remain static. +The other aspect of risk management that SSVC allows a team to customize is its risk appetite or risk tolerance. + +A team's risk appetite is reflected directly by the priority labels for each combination of decision values. +For example, a vulnerability with [no or minor](#public-safety-impact) [*Public Safety Impact*](#public-safety-impact), [total](#technical-impact) [*Technical Impact*](#technical-impact), and [efficient](#utility) [*Utility*](#utility) might be handled with [*scheduled*](#supplier-decisions) priority by one team and [*out-of-cycle*](#supplier-decisions) priority by another. +As long as each team has documented this choice and is consistent in its own application of its own choice, the two teams can legitimately have different appetites for vulnerability risk. +SSVC enables teams with such different risk appetites to discuss and communicate precisely the circumstances where they differ. + +When doing the detailed risk management work of creating or modifying a tree, we recommend working from text files with one line or row for each unique combination of decision values. +For examples, see [SSVC/data](https://github.com/CERTCC/SSVC/tree/main/data). +An important benefit, in our experience, is that it is easier to identify a question by saying “I'm unsure about row 16” than anything else we have thought of so far. +Once the humans agree on the decision tree, it can be converted to a JSON schema for easier machine-readable communication, following the provided [SSVC provision JSON schema](https://github.com/CERTCC/SSVC/blob/main/data/schema/SSVC_Provision_v2.03.schema.json). + +Once the decision points are selected and the prioritization labels agreed upon, it is convenient to be able to visually compress the text file by displaying it as a decision tree. +Making the decision process accessible has a lot of benefits. +Unfortunately, it also makes it a bit too easy to overcomplicate the decision. + +The academic literature surrounding the measurement of decision tree quality is primarily concerned with measuring classification errors given a particular tree and a labeled data set. +In our case, we are not attempting to fit a tree to data. +Rather, we are interested in producing usable trees that minimize extraneous effort. +To that end, we briefly examine the qualities for which decision tree measurement is suitable. + +### Decision Tree Construction Concerns + +Decision tree construction methods must address five significant concerns: +- feature selection +- feature type +- overfitting +- parsimony +- versioning + +#### Feature selection + +Feature selection is perhaps the most important consideration for SSVC, because it directly affects the information gathering requirements placed on the analyst attempting to use the tree. +Each decision point in SSVC is a feature. + +The SSVC version 1 ~applier~ deployer tree had 225 rows when we wrote it out in long text form. +It only has four outcomes to differentiate between. +Thus, on average that decision process treats one situation (combination of decision values) as equivalent to 65 other situations. +If nothing else, this means analysts are spending time gathering evidence to make fine distinctions that are not used in the final decision. +The added details also make it harder for the decision process to accurately manage the risks in question. +This difficulty arises because more variance and complexity there is in the decision increases the possibility of errors in the decision process itself. + +#### Feature types + +Regarding feature types, all of the features included in SSVC version 2 can be considered ordinal data. +That is, while they can be ordered (e.g., for Exploitation, active is greater than poc is greater than none), they can not be compared via subtraction or division (active - poc = nonsense). +The use of ordinal features is a key assumption behind our use of the parsimony analysis that follows. + +#### Overfitting + +When decision trees are used in a machine learning context, overfitting increases tree complexity by incorporating the noise in the training data set into the decision points in a tree. +In our case, our “data” is just the set of outcomes as decided by humans, so overfitting is less of a concern, assuming the feature selection has been done with care. + +#### Parsimony +Parsimony is, in essence, Occam's Razor applied to tree selection. Given the choice between two trees that have identical outputs, one should choose the tree with fewer decisions. +One way to evaluate the parsimony of a tree is by applying the concept of feature importance to ensure that each feature is contributing adequately to the result. +While there are a few ways to compute feature importance, the one we found most useful is permutation importance. +Permutation importance can be calculated on a candidate tree to highlight potential issues. +It works by randomly shuffling the values for each feature individually and comparing a fitness metric on the shuffled tree to the original. +The change in fitness is taken to be the importance of the feature that was shuffled. +Permutation importance is usually given as a number in the interval [0,1]. +Python's scikit-learn provides a permutation importance method, which we used to evaluate our trees. + +Interpreting the results of a permutation importance computation on a tree involves nuance, but one rule we can state is this: +any feature with a computed permutation importance of zero can be eliminated from the tree without losing any relevant information. +When all of the permutation importance scores for all features are relatively equal, that is an indication that each feature is approximately equally relevant to the decision. + +More likely, however, is that some subset of features will be of relatively equal importance, and one might be of considerably lower importance (yet not zero). +In this case, the lowest importance feature should be considered for refinement or elimination. +It is possible that adjusting the definition of a feature or its available values (whether redefining, adding, or removing options) could increase its importance. +Reasons to retain a low-importance feature include: +* the feature is relevant to a small set of important circumstances that a tree without the feature would otherwise be unable to discriminate +* the effort required to determine the correct value for the feature is relatively small, for example information that might be collected automatically +* the feature enables other features to be defined more clearly +Features that meet none of the above criteria may be good candidates for elimination. + +Customizing a tree by changing the outcome priority labels can also affect the importance of a feature. +This sort of customization is often the simplest way to adjust the importance of a feature. + +While there is no hard and fast rule for when a tree is too big, we suggest that if all of your outcomes are associated with more than 15 situations (unique combinations of decision values), you would benefit from asking whether your analysts actually use all the information they would be gathering. +Thus, 60 unique combinations of decision values is the point at which a decision tree with four distinct outcomes is, on average, potentially too big. + +#### Tree Versioning + +SSVC trees should be identifiable by name and version. A tree name is simply a short descriptive label for the tree derived from the stakeholder and/or function the tree is intended for. Tree versions are expected to share the major and minor version numbers with the SSVC version in which their decision points are defined. Revisions should increment the patch number. For example: “Applier Tree v1.1.0” would be the identity of the version of the Applier Tree as published in version 1.1 of SSVC. +“Coordinator Publish Tree v2.0.3” would be the identity of a future revision of the Coordinator Publish Tree as described in this document. The terms “major”, “minor”, and “patch” with respect to version numbering are intended to be consistent with [Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html). + +### Sharing Trees With Others + +Communities of shared interest may desire to share information about decision points or even create custom trees to share within their community. +Examples include: +- an Information Sharing and Analysis Organization (ISAO) within a critical infrastructure sector might want to define a custom decision point relevant to their constituents' regulatory compliance. +- a corporate Computer Security Incident Response Team (CSIRT) might choose to adjust decision priorities for an existing tree for use by its subsidiaries. +- a government department might define a separate tree using existing decision points to address a particular governance process within their constituent agencies. +- a regional coordinator might want to produce decision point information as a product of its threat analysis work and provide this information to its constituency in an advisory. + +In these and other scenarios, there are two scopes to consider: +1. Decision Point Scope +2. Decision Tree Scope + +#### Decision Point Scope + +Each decision point defined in this document has a characteristic scope, either *stakeholder-agnostic* or *stakeholder-specific*. + +- **Stakeholder-agnostic decision points** describe the state of the world outside the stakeholder's environment. +One might think of them as global facts that form the background context in which the stakeholder is making a prioritization decision. +Nearly all stakeholders should agree on the assignment of specific values to these decision points. +- **Stakeholder-specific decision points** are expected to be contextual to some set of stakeholders. +Information about a stakeholder-specific decision point can still be inherited by other stakeholders using the same tree. +For example in the corporate CSIRT scenario above, the [*System Exposure*](#system-exposure) value might be consistent across all subsidiaries for a centrally managed service. + +We generally consider the following decision points to be *stakeholder-agnostic*: +- [*Exploitation*](#exploitation) +- [*Technical Impact*](#technical-impact) +- [*Automatable*](#automatable) + +On the contrary, we consider the following decision points to be *stakeholder-specific*: +- [*Value Density*](#value-density) +- [*Utility*](#utility) +- [*Safety Impact*](#safety-impact) +- [*Public Safety Impact*](#public-safety-impact) +- [*Situated Safety Impact*](#situated-safety-impact) +- [*Mission Impact*](#mission-impact) +- [*Human Impact*](#human-impact) +- [*System Exposure*](#system-exposure) + +We anticipate that most custom decision points created by stakeholders for themselves or a constituency will be of the *stakeholder-specific* variety. +Examples of these sorts of custom decision points include +- A decision point indicating whether a system or mission context is affected by regulatory oversight that might alter the decision priority. +E.g., a healthcare-focused ISAO might define a decision point about whether a vulnerability affects patient data privacy protection. +- A decision point that incorporates the concept of change risk to a deployer. +E.g., a financial institution might have a very low tolerance for changes to a transaction clearing system. +- A decision point that indicates whether the affected software belongs to a list of critical software for a specific constituency. +E.g., an open-source consortium might want to prioritize fix development for a set of key projects. + +#### Decision Tree Scope + +Two kinds of modifications are possible at the decision tree level. + +- A *Risk Appetite Shift* retains the structure of an existing tree and all its decision points, and simply adjusts the decision outputs according to the stakeholder's risk appetite. +For example, an organization with sufficient resources to efficiently deploy fixes might choose to defer fewer cases than the default tree would recommend. +- *Tree Customization* can be done in one of three ways: + 1. incorporating an already-defined decision point into an existing tree that does not already contain it. + 2. defining a new decision point and adding it to an existing tree. +Note that adding or removing an option from an existing decision point should be treated as creating a new decision point. +The new decision point should be given a distinct name as well. + 3. defining a new tree entirely from existing or new decision points + +Because tree customization changes the tree structure and implies the addition or removal of leaf nodes, it will be necessary for the organization to review the decision outputs in light of its risk appetite as well. + +Risk-shifted or customized trees can be shared among a community of interest, of course. +Further customization within each stakeholder remains an option as well, although there is likely a diminishing return on more than a few layers of customization for the same basic decision. +Of course, SSVC users might choose to construct other trees to inform other decisions. + diff --git a/doc/md_src_files/07_05_00_evidence_gathering.md b/doc/md_src_files/07_05_00_evidence_gathering.md new file mode 100644 index 00000000..d3d20035 --- /dev/null +++ b/doc/md_src_files/07_05_00_evidence_gathering.md @@ -0,0 +1,23 @@ +## Guidance for Evidence Gathering + +To answer each of these decision points, a stakeholder should, as much as possible, have a repeatable evidence collection and evaluation process. However, we are proposing decisions for humans to make, so evidence collection and evaluation is not totally automatable. That caveat notwithstanding, some automation is possible. + +For example, whether exploitation modules are available in ExploitDB, Metasploit, or other sources is straightforward. +We hypothesize that searching Github and Pastebin for exploit code can be captured in a script. +A supplier or deployer could then define [*Exploitation*](#exploitation) to take the value of [*PoC*](#exploitation) if there are positive search results for a set of inputs derived from the CVE entry in at least one of these venues. +At least, for those vulnerabilities that are not “automatically” PoC-ready, such as on-path attackers for TLS or network replays. + +Some of the decision points require a substantial upfront analysis effort to gather risk assessment or organizational data. However, once gathered, this information can be efficiently reused across many vulnerabilities and only refreshed occasionally. An obvious example of this is the mission impact decision point. To answer this, a deployer must analyze their essential functions, how they interrelate, and how they are supported. Exposure is similar; answering that decision point requires an asset inventory, adequate understanding of the network topology, and a view of the enforced security controls. Independently operated scans, such as Shodan or Shadowserver, may play a role in evaluating exposure, but the entire exposure question cannot be reduced to a binary question of whether an organization’s assets appear in such databases. Once the deployer has the situational awareness to understand MEFs or exposure, selecting the answer for each individual vulnerability is usually straightforward. + +Stakeholders who use the prioritization method should consider releasing the priority with which they handled the vulnerability. This disclosure has various benefits. For example, if the supplier publishes a priority ranking, then deployers could consider that in their decision-making process. One reasonable way to include it is to break ties for the deployer. If a deployer has three “scheduled” vulnerabilities to remediate, they may address them in any order. If two vulnerabilities were produced by the supplier as “scheduled” patches, and one was “out-of-cycle,” then the deployer may want to use that information to favor the latter. + +In the case where no information is available or the organization has not yet matured its initial situational analysis, we can suggest something like defaults for some decision points. +If the deployer does not know their exposure, that means they do not know where the devices are or how they are controlled, so they should assume [*System Exposure*](#system-exposure) is [*open*](#system-exposure). +If the decision maker knows nothing about the environment in which the device is used, we suggest assuming a [*major*](#safety-impact) [*Safety Impact*](#safety-impact). This position is conservative, but software is thoroughly embedded in daily life now, so we suggest that the decision maker provide evidence that no one’s well-being will suffer. The reach of software exploits is no longer limited to a research network. +Similarly, with [*Mission Impact*](#mission-impact), the deployer should assume that the software is in use at the organization for a reason, and that it supports essential functions unless they have evidence otherwise. +With a total lack of information, assume [*support crippled*](#mission-impact) as a default. +[*Exploitation*](#exploitation) needs no special default; if adequate searches are made for exploit code and none is found, the answer is [*none*](#exploitation). +If nothing is known about [*Automatable*](#automatable), the safer answer to assume is [*yes*](#automatable). +[*Value Density*](#value-density) should always be answerable; if the product is uncommon, it is probably [*diffuse*](#value-density). +The resulting decision set {*none*, *open*, *efficient*, *medium*} results in a scheduled patch application in our recommended deployer tree. + diff --git a/doc/md_src_files/07_06_00_asset_management.md b/doc/md_src_files/07_06_00_asset_management.md new file mode 100644 index 00000000..467dfd79 --- /dev/null +++ b/doc/md_src_files/07_06_00_asset_management.md @@ -0,0 +1,27 @@ +## Relationship to asset management + +Vulnerability management is a part of asset management. +SSVC can benefit from asset management practices and systems, particularly in regard to automating data collection and answers for some decision points. +SSVC depends on asset management to some extent, particularly for context on the cost and risk associated with changing or updating the asset. + +Asset management can help automate the collection of the [*Mission Impact*](#mission-impact), [*Situated Safety Impact*](#situated-safety-impact), and [*System Exposure*](#system-exposure) decision points. +These decision points tend to apply per asset rather than per vulnerability. +Therefore, once each is assessed for each asset, it can be applied to each vulnerability that applies to that asset. +While the asset assessment should be reviewed occasionally for accuracy, storing this data in an asset management system should enable automated scoring of new vulnerabilities on these decision points for those assets. + +Our method is for prioritizing vulnerabilities based on the risk stemming from exploitation. +There are other reasonable asset management considerations that may influence remediation timelines. +There are at least three aspects of asset management that may be important but are out of scope for SSVC. +First and most obvious is the transaction cost of conducting the mitigation or remediation. +System administrators are paid to develop or apply any remediations or mitigations, and there may be other transactional costs such as downtime for updates. +Second is the risk of the remediation or mitigation introducing a new error or vulnerability. +Regression testing is part of managing this type of risk. Finally, there may be an operational cost of applying a remediation or mitigation, representing an ongoing change of functionality or increased overhead. +A decision maker could order work within one SSVC priority class (scheduled, out-of-cycle, etc.) based on these asset management considerations, for example. +Once the organization remediates or mitigates all the high-priority vulnerabilities, they can then focus on the medium-level vulnerabilities with the same effort spent on the high-priority ones. + +Asset management and risk management also drive some of the up-front work an organization would need to do to gather some of the necessary information. +This situation is not new; an asset owner cannot prioritize which fixes to deploy to its assets if it does not have an accurate inventory of its assets. +The organization can pick its choice of tools; there are about 200 asset management tools on the market [@captera]. +Emerging standards like the Software Bill of Materials (SBOM) [@manion2019sbom] would likely reduce the burden on asset management, and organizations should prefer systems which make such information available. +If an organization does not have an asset management or risk management (see also [Gathering Information About Mission Impact](#gathering-information-about-mission-impact)) plan and process in place, then SSVC provides some guidance as to what information is important to vulnerability management decisions and the organization should start capturing, storing, and managing. + diff --git a/doc/md_src_files/07_07_00_development_methodology.md b/doc/md_src_files/07_07_00_development_methodology.md new file mode 100644 index 00000000..b0437919 --- /dev/null +++ b/doc/md_src_files/07_07_00_development_methodology.md @@ -0,0 +1,9 @@ +## Development Methodology + +For this tabletop refinement, we could not select a mathematically representative set of CVEs. +The goal was to select a handful of CVEs that would cover diverse types of vulnerabilities. +The CVEs that we used for our tabletop exercises are CVE-2017-8083, CVE-2019-2712, CVE-2014-5570, and CVE-2017-5753. +We discussed each one from the perspective of supplier and deployer. +We evaluated CVE-2017-8083 twice because our understanding and descriptions had changed materially after the first three CVEs (six evaluation exercises). +After we were satisfied that the decision trees were clearly defined and captured our intentions, we began the formal evaluation of the draft trees, which we describe in the next section. + From 02862131978b207f154e5b581efe2a1b375d551e Mon Sep 17 00:00:00 2001 From: "Allen D. Householder" Date: Fri, 30 Jun 2023 14:37:14 -0400 Subject: [PATCH 07/12] update Makefile to stop using versioned file names use current commit hash when run to determine fix level in title of pdf/html --- doc/Makefile | 11 +- doc/ssvc.html | 459 +++++++++++++++++++++++++++++++++----------------- doc/ssvc.pdf | Bin 468509 -> 475685 bytes 3 files changed, 314 insertions(+), 156 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index 6cbd45f2..0252949f 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -13,12 +13,12 @@ SHELL=/bin/bash # versioning # need a better way of automatically updating version numbers MAJOR=2 -MINOR=0 +MINOR=1 # these have the common meanings from semantic versioning # major should be incremented with content changes that introduce incompatibilities # minor should be incremented for meaningful changes that do not break compatibility -FIX=5 -# fix version needs to be incremented with every commit +# fix is based on git commit +FIX=`git rev-parse --short HEAD` #versioning across different content is a bit complicated. # The PDF major.minor should match any schema, tree, or other supporting document version @@ -29,6 +29,7 @@ HOME:=$(shell pwd) OUTDIR=$(HOME) SRC=./md_src_files + TITLE:="Prioritizing Vulnerability Response: A Stakeholder-Specific Vulnerability Categorization (SSVC version $(MAJOR).$(MINOR).$(FIX))" TITLE_PREFIX:="SSVC" COMPILE_DATE:="Compiled `date -u`" @@ -38,8 +39,8 @@ BIBLIOGRAPHY:=$(SRC)/sources_ssvc.bib # TODO decide whether to include FIX level in file name or not #PDF_OUT:=$(OUTDIR)/ssvc_v$(MAJOR)-$(MINOR)-$(FIX).pdf #HTML_OUT:=$(OUTDIR)/ssvc_v$(MAJOR)-$(MINOR)-$(FIX).html -PDF_OUT:=$(OUTDIR)/ssvc_v$(MAJOR)-$(MINOR).pdf -HTML_OUT:=$(OUTDIR)/ssvc_v$(MAJOR)-$(MINOR).html +PDF_OUT:=$(OUTDIR)/ssvc.pdf +HTML_OUT:=$(OUTDIR)/ssvc.html EMOJI_REPLACEMENTS:=$(HOME)/emoji-replacements.sed diff --git a/doc/ssvc.html b/doc/ssvc.html index 3b510aa4..cb71ebce 100644 --- a/doc/ssvc.html +++ b/doc/ssvc.html @@ -12,7 +12,7 @@ - SSVC – Prioritizing Vulnerability Response: A Stakeholder-Specific Vulnerability Categorization (SSVC version 2.0.5) + SSVC – Prioritizing Vulnerability Response: A Stakeholder-Specific Vulnerability Categorization (SSVC version 2.1.5e43ed2)