Steve Kerns

More by Steve Kerns
WP_Query Object
(
    [query] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "26"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "26"
                            [compare] => LIKE
                        )

                )

        )

    [query_vars] => Array
        (
            [post_type] => Array
                (
                    [0] => post
                    [1] => webinars
                )

            [posts_per_page] => -1
            [post_status] => publish
            [meta_query] => Array
                (
                    [relation] => OR
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "26"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "26"
                            [compare] => LIKE
                        )

                )

            [error] => 
            [m] => 
            [p] => 0
            [post_parent] => 
            [subpost] => 
            [subpost_id] => 
            [attachment] => 
            [attachment_id] => 0
            [name] => 
            [pagename] => 
            [page_id] => 0
            [second] => 
            [minute] => 
            [hour] => 
            [day] => 0
            [monthnum] => 0
            [year] => 0
            [w] => 0
            [category_name] => 
            [tag] => 
            [cat] => 
            [tag_id] => 
            [author] => 
            [author_name] => 
            [feed] => 
            [tb] => 
            [paged] => 0
            [meta_key] => 
            [meta_value] => 
            [preview] => 
            [s] => 
            [sentence] => 
            [title] => 
            [fields] => 
            [menu_order] => 
            [embed] => 
            [category__in] => Array
                (
                )

            [category__not_in] => Array
                (
                )

            [category__and] => Array
                (
                )

            [post__in] => Array
                (
                )

            [post__not_in] => Array
                (
                )

            [post_name__in] => Array
                (
                )

            [tag__in] => Array
                (
                )

            [tag__not_in] => Array
                (
                )

            [tag__and] => Array
                (
                )

            [tag_slug__in] => Array
                (
                )

            [tag_slug__and] => Array
                (
                )

            [post_parent__in] => Array
                (
                )

            [post_parent__not_in] => Array
                (
                )

            [author__in] => Array
                (
                )

            [author__not_in] => Array
                (
                )

            [search_columns] => Array
                (
                )

            [ignore_sticky_posts] => 
            [suppress_filters] => 
            [cache_results] => 1
            [update_post_term_cache] => 1
            [update_menu_item_cache] => 
            [lazy_load_term_meta] => 1
            [update_post_meta_cache] => 1
            [nopaging] => 1
            [comments_per_page] => 50
            [no_found_rows] => 
            [order] => DESC
        )

    [tax_query] => WP_Tax_Query Object
        (
            [queries] => Array
                (
                )

            [relation] => AND
            [table_aliases:protected] => Array
                (
                )

            [queried_terms] => Array
                (
                )

            [primary_table] => wp_posts
            [primary_id_column] => ID
        )

    [meta_query] => WP_Meta_Query Object
        (
            [queries] => Array
                (
                    [0] => Array
                        (
                            [key] => new_authors
                            [value] => "26"
                            [compare] => LIKE
                        )

                    [1] => Array
                        (
                            [key] => new_presenters
                            [value] => "26"
                            [compare] => LIKE
                        )

                    [relation] => OR
                )

            [relation] => OR
            [meta_table] => wp_postmeta
            [meta_id_column] => post_id
            [primary_table] => wp_posts
            [primary_id_column] => ID
            [table_aliases:protected] => Array
                (
                    [0] => wp_postmeta
                )

            [clauses:protected] => Array
                (
                    [wp_postmeta] => Array
                        (
                            [key] => new_authors
                            [value] => "26"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                    [wp_postmeta-1] => Array
                        (
                            [key] => new_presenters
                            [value] => "26"
                            [compare] => LIKE
                            [compare_key] => =
                            [alias] => wp_postmeta
                            [cast] => CHAR
                        )

                )

            [has_or_relation:protected] => 1
        )

    [date_query] => 
    [request] => SELECT   wp_posts.ID
					 FROM wp_posts  INNER JOIN wp_postmeta ON ( wp_posts.ID = wp_postmeta.post_id )
					 WHERE 1=1  AND ( 
  ( wp_postmeta.meta_key = 'new_authors' AND wp_postmeta.meta_value LIKE '{8b420845f53deb995b02c5c1ce4c1da3127296d623e5c401d4623b9b94b5be54}\"26\"{8b420845f53deb995b02c5c1ce4c1da3127296d623e5c401d4623b9b94b5be54}' ) 
  OR 
  ( wp_postmeta.meta_key = 'new_presenters' AND wp_postmeta.meta_value LIKE '{8b420845f53deb995b02c5c1ce4c1da3127296d623e5c401d4623b9b94b5be54}\"26\"{8b420845f53deb995b02c5c1ce4c1da3127296d623e5c401d4623b9b94b5be54}' )
) AND wp_posts.post_type IN ('post', 'webinars') AND ((wp_posts.post_status = 'publish'))
					 GROUP BY wp_posts.ID
					 ORDER BY wp_posts.post_date DESC
					 
    [posts] => Array
        (
            [0] => WP_Post Object
                (
                    [ID] => 7654
                    [post_author] => 26
                    [post_date] => 2017-06-06 07:00:19
                    [post_date_gmt] => 2017-06-06 07:00:19
                    [post_content] => OWASP has just released their release candidate of the Top 10 most critical web application security risks. While no major changes were included, i.e. Injection is still number one in the list, they added two new ones:
  1. A7 – Insufficient Attack Protection
  2. A10 - Under protected APIs
This blog discusses the first.

A7 – Insufficient Attack Protection

OWASP stated the reason for the addition as being: For years, we’ve considered adding insufficient defenses against automated attacks. Based on the data call, we see that the majority of applications and APIs lack basic capabilities to detect, prevent, and respond to both manual and automated attacks. Application and API owners also need to be able to deploy patches quickly to protect against attacks. An application must protect itself against attacks not just from invalid input, but also involved detecting and blocking attempts to exploit the security vulnerabilities. The application must try to detect and prevent them, log these attempts and respond to them. What are some examples of attacks that should be handled?
  • Brute force attacks to guess user credentials
  • Flooding user’s email accounts using email forms in the application
  • Attempting to determined valid credit card numbers from stolen cards
  • Denial of service by flooding the application with many requests
  • XSS or SQL Injection attacks by automated tools
A more complete list can here found here but the ways in which they are handled are all very similar.

Prevention

The first step is to prevent these types of attacks. Consider using some built-in steps for preventing attacks to the application. This includes:
  • Remove or limit the values of the data accessed using the application; can it be changed, masked or removed?
  • Create use (abuse) cases that simulate automated web attacks.
  • Identify and restrict automated attacks by identifying automation techniques to determine is the requests are being made by a human or by an automated tool.
  • Make sure the user is authenticated and authorized to use the application.
  • Consider using CAPTCHA when high value functions are being performed.
  • Set limits on how many transaction can be performed over a specified time; consider doing this by user or groups of users, devices or IP address.
  • Consider the use of web application firewalls that detect these types of attacks. Another alternative is using OWASP AppSensor or similar; it is built into the application to detect these types of attacks.
  • Build conditions into your terms and conditions; require the user not to use automated tools when using the application.
Other items to consider to use include:
  • Networks firewalls
  • Load balancers
  • Anti-DDoS systems
  • Intrusion Detection System (IDS) and Intrusion Prevention System (IPS)
  • Data Loss Prevention

Detection

An application must determine if activity is an attack or just suspicious.  The response must be appropriate based on which of these is true.
  • Could the user have made a mistake when entering the data?
  • Is the process being followed for the application or is the user trying to jump past steps in the process?
  • Does the user need a special tool or knowledge?
If any two of the above items are true, then it is most likely an attack and not suspicious activity. Is it possible that the requests are coming in at a very high rate? A typical user may make one request every couple of seconds, whereas a tool such as Burp Suite Pro or WebInspect may make many more requests per second. The application should also detect these types of attack or attempts to find vulnerabilities in the application.

Response

The application can handle detected attacks or even the suspicion of attacks in a variety of ways. The first should be a warning to the user. This will deter a normal user that their activities are being monitored. It would also warn a malicious user that certain events are being monitored, though it will probably not deter the latter person. The application could, based on further activity after the warning, either logout the user or lockout the user. If a logout is performed, automated tools can be programed to automatically re-authenticate the user. If lockout is chosen, then all activity will stop. In any of the above cases, a legitimate user may end up calling the help desk, so the application must log this type of activity and notify the application’s administrators. The log must be reviewed to determine if the response was appropriate. Choosing which action to perform would depend on the sensitivity of the data within the application. A public website must be more forgiving to prevent overreaction to suspicious activities; whereas an application with highly sensitive data must respond quickly to suspicious activity.

Conclusion

The OWASP Top 10 2017 A7 – Insufficient Attack Protection requires the application to prevent, detect, and respond to attacks. This could affect other regulations such as PCI, which base their standards on the OWASP Top 10.

References

[post_title] => Application Self Protection - A New Addition to the OWASP Top 10 [post_excerpt] => OWASP has just released their release candidate of the Top 10 most critical web application security risks. While no major changes were included, they added two new ones. This blog discusses the first one in the list: A7 – Insufficient Attack Protection [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => application-self-protection [to_ping] => [pinged] => [post_modified] => 2023-03-16 09:30:50 [post_modified_gmt] => 2023-03-16 14:30:50 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=7654 [menu_order] => 628 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [1] => WP_Post Object ( [ID] => 6188 [post_author] => 26 [post_date] => 2016-03-28 07:00:21 [post_date_gmt] => 2016-03-28 07:00:21 [post_content] => Open source software (OSS) is software whose source code is available for modification or enhancement by anyone. Many companies use OSS to develop their applications, but yet do not know what dangers exist in it. There may be legal ramifications stemming from the licenses that are being used by the OSS or security vulnerabilities that can exist in the software itself. Do you even know what OSS you are using in your application? Have your developers pulled the source into your application instead of using the binaries? If not, the first step is to find out what OSS you are using and what versions. The next step is to find out what license this software is using.

Licenses

There are many different open source licenses, some of them good (permissive) and some not so good. A "permissive" license is simply a non-copyleft open source license — one that guarantees the freedoms to use, modify, and redistribute, but that permits proprietary derivative works. As of last count, the Open Source Initiative (OSI) has 76 different licenses, some permissive and some not so permissive. There are also some that OSI does not recognize, such as the Beerware license. It says “As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp”. This is considered a permissive license. Copyleft is a copyright licensing scheme where an author surrenders some, but not all rights under copyright law. Copyleft allows an author to impose some restrictions on those who want to engage in activities that would more usually be reserved by the copyright holder. "Weak copyleft" licenses are generally used for the creation of software libraries, to allow other software to link to the library and then be redistributed without the legal requirement for the work to be distributed under the library's copyleft license. Only changes to the weak-copylefted software itself become subject to the copyleft provisions of such a license, not changes to the software that links to it. This allows programs of any license to be compiled and linked against copylefted libraries such as glibc (the GNU project's implementation of the C standard library) and then redistributed without any re-licensing required. Copyleft licenses (GPL, etc.) become an issue if the OSS source is actually pulled into your application. The developers can do this without anyone’s knowledge, but would require you to release your source code. All of your intellectual property then becomes open source under that license. The licenses on the OSS you are using can have many or few restrictions on your software. Make sure you are aware of the license(s) that is applied to each OSS you are using and have a lawyer review all of them. What if you do not comply with the license? I am not a lawyer, but I believe if a company finds out you are using their software out of compliance with the license, you may end up with a lawsuit. In fact, the lawyer I was working with at a previous job was adamant that the company not use any copyleft software. He would not sign off on the software release unless it was free of copyleft software.

Security Vulnerabilities

As you are aware, all software has bugs and from a security perspective, the OSS you are using contains them as well. Over the last couple of weeks I was doing a web application penetration test and discovered that the software was using about 80 different open source libraries (JAR files). Among them were the Apache Commons Collections (ACC) and Apache Standard Taglibs (AST). Each of these have security vulnerabilities that are considered high risk (CVSS score of 7.5 or above). For example, ACC is vulnerable to insecure deserialization of data, which may result in arbitrary code execution. If the application is using OSS that is out-of-date by many months or years, it may have undiscovered or unreported vulnerabilities. Older software tend to have security vulnerabilities that go undetected or unreported. What vulnerabilities you allow in your software is up to your company policy, so you need to determine if you will allow the release of software that is old or contains security vulnerabilities.

Solutions

You can do research on each OSS you use. This means visiting the website for the OSS or opening up each JAR file and reviewing the license information. Make sure you track this because OSS can change licenses between releases. Under one version it could be released using the aforementioned BEER license and the next one could be under a copyleft license. For security vulnerabilities, going to the vendors web site might give you some information, but also review the following: Also consider using software to scan for these issues. The two that I am most familiar with are: I am most familiar with CLM, which we installed in a previous company that I worked for; it discovered many issues in the OSS we were using in our products. The software teams had to scramble to fix the issues that were discovered. As I mentioned before, the lawyer did not allow the release of any software with certain licenses. They ended up either upgrading the OSS or removing it completely from the software. [post_title] => Open Source Software – Is It the Death of Your Company? [post_excerpt] => Open source software could contain licenses that are bad for your company or contain security vulnerabilities that could damage your software. [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => open-source-software-death-company [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=6188 [menu_order] => 661 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [2] => WP_Post Object ( [ID] => 5970 [post_author] => 26 [post_date] => 2016-03-14 07:00:23 [post_date_gmt] => 2016-03-14 07:00:23 [post_content] => Back in January of 2015 NetSPI published a blog on extracting memory from an iOS device. Even though NetSPI provided a script to make it easy, it required iOS 7 (or less) and GDB; but GDB is currently no longer on iOS 8. Fortunately, there are other options to GDB and extracting memory from an Apple iPhone running iOS 8+ could not be easier. It requires the following pieces of software.
  • LLDB (https://lldb.llvm.org/)
  • Debugserver (part of Xcode)
  • Tcprelay.py (https://code.google.com/p/iphonetunnel-mac/source/browse/trunk/gui/tcprelay.py?r=5)
Of course you will need a jailbroken iPhone or iPad. I will not cover that part of the operation here. Start tcprelay so you can connect to the device over a USB connection:
$ ./tcprelay.py -t 22:2222 1234:1234 Forwarding local port 2222 to remote port 22 Forwarding local port 1234 to remote port 1234 Incoming connection to 2222 Waiting for devices... Connecting to device <MuxDevice: ID 17 ProdID 0x12a8 Serial '0ea150b00ba3deeacb42f399492b7990416a0c87' Location 0x14120000> Connection established, relaying data Incoming connection to 1234 Waiting for devices... Connecting to device <MuxDevice: ID 17 ProdID 0x12a8 Serial '0ea150b00ba3deeacb42f399492b7990416a0c87' Location 0x14120000> Connection established, relaying data
The command “tcprelay.py -t 22:2222 1234:1234” is redirecting two local ports to the device. The first one is used to SSH to the device over port 2222. The second one is the port the debugserver will be using. Then you will need to connect to the iOS device and start the debug server (I am assuming you have already copied the software to the device). If not, you can use scp to copy the binary.)
$ ssh root@127.0.0.1 -p 2222 root@127.0.0.1's password:
Then, if the application is already running, verify its name using ‘ps aux | grep <appname>’ and connect to the application with debugserver (using the name of the application not the PID):
root# ./debugserver *:1234 -a appname debugserver-@(#)PROGRAM:debugserver  PROJECT:debugserver-320.2.89 for arm64. Attaching to process appname... Listening to port 1234 for a connection from *... Waiting for debugger instructions for process 0.
The command ‘./debugserver *:1234 -a appname’ is telling the software to startup on port 1234 and hook into the application named ‘appname’. It will take a little time, so be patient. On the MAC, startup LLDB and connect to the debugserver software running on the iOS device. Remember, we have relayed the device port 1234 that the debugserver is listening on to the local port 1234.
$ lldb (lldb) process connect connect://127.0.0.1:1234 Process 2017 stopped * thread #1: tid = 0x517f9, 0x380f54f0 libsystem_kernel.dylib mach_msg_trap + 20, queue = 'com.apple.main-thread', stop reason = signal SIGSTOP frame #0: 0x380f54f0 libsystem_kernel.dylib mach_msg_trap + 20 libsystem_kernel.dylib mach_msg_trap: ->  0x380f54f0 <+20>: pop    {r4, r5, r6, r8} 0x380f54f4 <+24>: bx     lr libsystem_kernel.dylib mach_msg_overwrite_trap: 0x380f54f8 <+0>:  mov    r12, sp 0x380f54fc <+4>:  push   {r4, r5, r6, r8}
Now you can dump the information about the memory sections of the application.
(lldb) image dump sections appname Sections for '/private/var/mobile/Containers/Bundle/Application/F3CFF345-71FC-47C4-B1FB-3DAC523C7627/appname.app/appname(0x0000000000047000)' (armv7): SectID     Type             Load Address                             File Off.  File Size  Flags      Section Name ---------- ---------------- ---------------------------------------  ---------- ---------- ---------- ---------------------------- 0x00000100 container        [0x0000000000000000-0x0000000000004000)* 0x00000000 0x00000000 0x00000000 appname.__PAGEZERO 0x00000200 container        [0x0000000000047000-0x00000000001af000)  0x00000000 0x00168000 0x00000000 appname.__TEXT 0x00000001 code             [0x000000000004e6e8-0x000000000016d794)  0x000076e8 0x0011f0ac 0x80000400 appname.__TEXT.__text 0x00000002 code             [0x000000000016d794-0x000000000016e5e0)  0x00126794 0x00000e4c 0x80000400 appname.__TEXT.__stub_helper 0x00000003 data-cstr        [0x000000000016e5e0-0x0000000000189067)  0x001275e0 0x0001aa87 0x00000002 appname.__TEXT.__cstring 0x00000004 data-cstr        [0x0000000000189067-0x00000000001a5017)  0x00142067 0x0001bfb0 0x00000002 appname.__TEXT.__objc_methname 0x00000005 data-cstr        [0x00000000001a5017-0x00000000001a767a)  0x0015e017 0x00002663 0x00000002 appname.__TEXT.__objc_classname 0x00000006 data-cstr        [0x00000000001a767a-0x00000000001abe0c)  0x0016067a 0x00004792 0x00000002 appname.__TEXT.__objc_methtype 0x00000007 regular          [0x00000000001abe10-0x00000000001ac1b8)  0x00164e10 0x000003a8 0x00000000 appname.__TEXT.__const 0x00000008 regular          [0x00000000001ac1b8-0x00000000001aeb20)  0x001651b8 0x00002968 0x00000000 appname.__TEXT.__gcc_except_tab 0x00000009 regular          [0x00000000001aeb20-0x00000000001aeb46)  0x00167b20 0x00000026 0x00000000 appname.__TEXT.__ustring 0x0000000a code             [0x00000000001aeb48-0x00000000001af000)  0x00167b48 0x000004b8 0x80000408 appname.__TEXT.__symbolstub1 0x00000300 container        [0x00000000001af000-0x00000000001ef000)  0x00168000 0x00040000 0x00000000 appname.__DATA 0x0000000b data-ptrs        [0x00000000001af000-0x00000000001af4b8)  0x00168000 0x000004b8 0x00000007 appname.__DATA.__lazy_symbol 0x0000000c data-ptrs        [0x00000000001af4b8-0x00000000001af810)  0x001684b8 0x00000358 0x00000006 appname.__DATA.__nl_symbol_ptr 0x0000000d regular          [0x00000000001af810-0x00000000001b2918)  0x00168810 0x00003108 0x00000000 appname.__DATA.__const 0x0000000e objc-cfstrings   [0x00000000001b2918-0x00000000001ba8d8)  0x0016b918 0x00007fc0 0x00000000 appname.__DATA.__cfstring 0x0000000f data-ptrs        [0x00000000001ba8d8-0x00000000001baf1c)  0x001738d8 0x00000644 0x10000000 appname.__DATA.__objc_classlist 0x00000010 regular          [0x00000000001baf1c-0x00000000001baf4c)  0x00173f1c 0x00000030 0x10000000 appname.__DATA.__objc_nlclslist 0x00000011 regular          [0x00000000001baf4c-0x00000000001bafa0)  0x00173f4c 0x00000054 0x10000000 appname.__DATA.__objc_catlist 0x00000012 regular          [0x00000000001bafa0-0x00000000001bafa4)  0x00173fa0 0x00000004 0x10000000 appname.__DATA.__objc_nlcatlist 0x00000013 regular          [0x00000000001bafa4-0x00000000001bb078)  0x00173fa4 0x000000d4 0x00000000 appname.__DATA.__objc_protolist 0x00000014 regular          [0x00000000001bb078-0x00000000001bb080)  0x00174078 0x00000008 0x00000000 appname.__DATA.__objc_imageinfo 0x00000015 data-ptrs        [0x00000000001bb080-0x00000000001e0d40)  0x00174080 0x00025cc0 0x00000000 appname.__DATA.__objc_const 0x00000016 data-cstr-ptr    [0x00000000001e0d40-0x00000000001e4420)  0x00199d40 0x000036e0 0x10000005 appname.__DATA.__objc_selrefs 0x00000017 regular          [0x00000000001e4420-0x00000000001e442c)  0x0019d420 0x0000000c 0x00000000 appname.__DATA.__objc_protorefs 0x00000018 data-ptrs        [0x00000000001e442c-0x00000000001e4ab8)  0x0019d42c 0x0000068c 0x10000000 appname.__DATA.__objc_classrefs 0x00000019 data-ptrs        [0x00000000001e4ab8-0x00000000001e4e48)  0x0019dab8 0x00000390 0x10000000 appname.__DATA.__objc_superrefs 0x0000001a regular          [0x00000000001e4e48-0x00000000001e6184)  0x0019de48 0x0000133c 0x00000000 appname.__DATA.__objc_ivar 0x0000001b data-ptrs        [0x00000000001e6184-0x00000000001ea02c)  0x0019f184 0x00003ea8 0x00000000 appname.__DATA.__objc_data 0x0000001c data             [0x00000000001ea030-0x00000000001ed978)  0x001a3030 0x00003948 0x00000000 appname.__DATA.__data 0x0000001d zero-fill        [0x00000000001ed980-0x00000000001edce0)  0x00000000 0x00000000 0x00000001 appname.__DATA.__bss 0x0000001e zero-fill        [0x00000000001edce0-0x00000000001edce8)  0x00000000 0x00000000 0x00000001 appname.__DATA.__common 0x00000400 container        [0x00000000001ef000-0x0000000000207000)  0x001a8000 0x00015bf0 0x00000000 appname.__LINKEDIT
The next step is to convert that output into LLDB commands to actually dump the data in those memory sections. You can probably skip the sections named zero-fill or code. For example, the take the following output:
0x00000003 data-cstr        [0x000000000016e5e0-0x0000000000189067)  0x001275e0 0x0001aa87 0x00000002 appname.__TEXT.__cstring
Into the LLDB command:
Memory read --outfile ~/0x00000003data-cstr 0x000000000016e5e0 0x0000000000189067 –force
This command is telling LLDB to dump the memory from address 0x000000000016e5e0 to 0x0000000000189067 and put it into the file 0x00000003data-cstr.
(lldb) memory read --outfile ~/0x00000003data-cstr 0x000000000016e5e0 0x0000000000189067 –force
You will (or should) not see any output from this command other that the file being created. Once you have all of the files, search them using your favorite search tool or even a text editor. Search for sensitive data (i.e. credit card number, passwords, etc. The files will contain information similar to the following:
0x0016e5e0: 3f 3d 26 2b 00 3a 2f 3d 2c 21 24 26 27 28 29 2a  ?=&+.:/=,!$&'()* 0x0016e5f0: 2b 3b 5b 5d 40 23 3f 00 00 62 72 61 6e 64 4c 6f  +;[]@#?..brandLo 0x0016e600: 67 6f 2e 70 6e 67 00 54 72 61 64 65 47 6f 74 68  go.png.TradeGoth 0x0016e610: 69 63 4c 54 2d 42 6f 6c 64 43 6f 6e 64 54 77 65  icLT-BoldCondTwe 0x0016e620: 6e 74 79 00 4c 6f 61 64 69 6e 67 2e 2e 2e 00 4c  nty.Loading....L 0x0016e630: 6f 61 64 69 6e 67 00 76 31 32 40 3f 30 40 22 4e  oading.v12@?0@"N 0x0016e640: 53 44 61 74 61 22 34 40 22 45 70 73 45 72 72 6f  SData"4@"EpsErro 0x0016e650: 72 22 38 00 6c 6f 61 64 69 6e 67 50 61 67 65 54  r"8.loadingPageT 0x0016e660: 79 70 65 00 54 69 2c 4e 2c 56 5f 6c 6f 61 64 69  ype.Ti,N,V_loadi 0x0016e670: 6e 67 50 61 67 65 54 79 70 65 00 6f 76 65 72 76  ngPageType.overv 0x0016e680: 69 65 77 52 65 71 52 65 73 48 61 6e 64 6c 65 72  iewReqResHandler 0x0016e690: 00 54 40 22 45 70 73 4f 76 65 72 76 69 65 77 52  .T@"EpsOverviewR 0x0016e6a0: 65 71 52 65 73 48 61 6e 64 6c 65 72 22 2c 26 2c  eqResHandler",&, 0x0016e6b0: 4e 2c 56 5f 6f 76 65 72 76 69 65 77 52 65 71 52  N,V_overviewReqR 0x0016e6c0: 65 73 48 61 6e 64 6c 65 72 00 41 50 49 43 61 6c  esHandler.APICal
Have fun looking at the iOS application memory and use this process for only good intentions. As stated in the previously mentioned blog: This technique can be used to determine if the application is not removing sensitive information from memory once the instantiated classes are done with the data. All applications should de-allocate spaces in memory that deal with classes and methods that were used to handle sensitive information, otherwise you run the risk of the information sitting available in memory for an attacker to see. [post_title] => Dumping Memory on iOS 8 [post_excerpt] => Back in January of 2015 NetSPI published a blog on extracting memory from an iOS device. Even though NetSPI provided a script to make... [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => dumping-memory-on-ios-8 [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:28 [post_modified_gmt] => 2021-04-13 00:05:28 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=5970 [menu_order] => 662 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [3] => WP_Post Object ( [ID] => 1123 [post_author] => 26 [post_date] => 2014-03-27 07:00:27 [post_date_gmt] => 2014-03-27 07:00:27 [post_content] =>

On March 25, 2014, Microsoft released the source code for Microsoft Word for Windows 1.1a. They said they released it "to help future generations of technologists better understand the roots of personal computing."

I thought it would be interesting to perform an automated code review on it using CheckMarx, to see how they did related to security. The source consisted mainly of C++ code (376,545 lines of code) as well as code written in assembler. The assembler code was not scanned because CheckMarx (or any other automated code scanners) does not support assembler. What came out of the tool was interesting.

CheckMarx indicated that the risk in the code is:

Sk Wayback

The distribution of risk from Informational to High:

Sk Wayback

You have to remember that this code is from the 1980s. Many people did not have a concept of secure code and the development tools did not address security at all.

The top five vulnerabilities are as follows:

Sk Wayback

From the code that I looked at, most of the issues come from the use of unsafe functions. For example:

	if (!strcmp(szClass, "BEGDATA")) 		strcpy(szNameSeg, "Data"); 	else 		strcpy(szNameSeg, szName); 	nSegCur = nSeg;

The function strcpy has been replaced by a safe function strncpy. The function strncpy combats buffer overflow by requiring you to put a length in it. The function strncpy did not exist in the 1980s. The code also contains 123 instances of the goto statement. For example:

LError:  		cmdRet = cmdError; 		goto LRet; 		} 	pdod = PdodDoc(doc);

From the MSDN web site, Microsoft states, "It is good programming style to use the break, continue, and return statements instead of the goto statement whenever possible. However, because the break statement exits from only one level of a loop, you might have to use a goto statement to exit a deeply nested loop." I am not sure of the C++ syntax back in the 1980s, but maybe break, continue, and return statements did not exist.

You can get a copy of the code for both MS Word and MS-DOS from here: https://www.computerhistory.org/press/ms-source-code.html. Just remember there now are better ways to write code.

Below is the complete list of issues found in the code:

Vulnerability TypeOccurrencesSeverity
Buffer Overflow unbounded180High
Buffer Overflow StrcpyStrcat22High
Format String Attack18High
Buffer Overflow OutOfBound12High
Buffer Overflow cpycat3High
Use of Uninitialized Pointer135Medium
Dangerous Functions58Medium
Use of Uninitialized Variable41Medium
Char Overflow35Medium
Stored Format String Attack19Medium
Stored Buffer Overflow cpycat11Medium
MemoryFree on StackVariable4Medium
Short Overflow2Medium
Integer Overflow1Medium
Memory Leak1Medium
NULL Pointer Dereference341Low
Potential Path Traversal24Low
Unchecked Array Index18Low
Unchecked Return Value11Low
Potential Off by One Error in Loops6Low
Use of Insufficiently Random Values3Low
Potential Precision Problem2Low
Size of Pointer Argument1Low
Methods Without ReturnType500Information
Unused Variable310Information
GOTO Statement132Information
Empty Methods9Information
Potential Off by One Error in Loops6Information

This code is a good example of what not to do.

Programming languages and tools have evolved to make your application much more secure, but only if you teach your developers the concepts of secure coding.

[post_title] => The Way Back Machine - Microsoft Word for Windows 1.1a [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => the-way-back-machine-microsoft-word-for-windows-1-1a [to_ping] => [pinged] => [post_modified] => 2021-06-08 21:49:12 [post_modified_gmt] => 2021-06-08 21:49:12 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1123 [menu_order] => 720 [post_type] => post [post_mime_type] => [comment_count] => 3 [filter] => raw ) [4] => WP_Post Object ( [ID] => 1140 [post_author] => 26 [post_date] => 2013-11-14 07:00:44 [post_date_gmt] => 2013-11-14 07:00:44 [post_content] => The PCI Council has just released PA-DSS version 3.0. They have added new requirements, removed one, and changed a few. How this affects your application really depends on how you implemented security.

What's Been Added

Req. 3.4 Payment application must limit access to required functions/resources and enforce least privilege for built-in accounts:

  • By default, all application/service accounts have access to only those functions/resources specifically needed for purpose of the application/service account.
  • By default, all application/service accounts have minimum level of privilege assigned for each function/resource as needed for the application/service account.
Your application setup needs to make sure that it is using or setting the privileges needed to do the work and not grant excessive permissions. This is intended for built-in accounts as well as service accounts. Make sure you have documented the permissions needed by any default or service accounts. The auditor will need to verify documentation against what was implemented.

Req. 5.1.5 –Payment application developers to verify integrity of source code during the development process

You need to make sure that any source control tools (i.e. Visual SourceSafe, etc.) is configured so that only the people that do development can make changes to the code. This does not preclude giving other users read access, but you need to minimize who has write access.

Req. 5.1.6 – Payment applications to be developed according to industry best practices for secure coding techniques.

You must develop the application with least privilege to ensure insecure assumptions are not introduced into the application. To prevent an attacker from obtaining sensitive information about an application failure including fail-safe defaults that could then be used to create subsequent attacks. You must also ensure that security is applied to all accesses and inputs into the application to avoid an input channel being left open to compromise. This includes how the sensitive data and the PAN is handled in memory, the PCI Council it trying to prevent capture of this data by screen scrapping. Try to encrypt this data in memory and keep it there only for a short period of time.

Req. 5.2.10 - Broken authentication and session management

You need to make sure, in your web application, that:
  • Any session cookies are marked as secure.
  • The session is never to be passed on the URL. This would allow them to be logged in the web server.
  • The web application must time out the session after a certain number of minutes. Once timed out, the user must re-authenticate to get access to the application.
  • The session id must change when there is a change in permissions. For example, a session id is set when an anonymous user accesses the login page and after successful authentication, the session id must change to a different value.
  • If a user logs out of the application, you need to delete the session on both the client and the server.

Req. 5.4 - Payment application vendors to incorporate versioning methodology for each payment application.

This is not a change in the way you write your code but a process change and, like so many other requirements, has to be documented both internally and in the Implementation Guide. Many companies already do this but make sure you have a defined method of changing the version numbers.
  • Details of how the elements of the version-numbering scheme are in accordance with requirements specified in the PA-DSS Program Guide.
  • The format of the version-numbering scheme is specified and includes details of number of elements, separators, character set, etc. (e.g., 1.1.1.N, consisting of alphabetic, numeric, and/or alphanumeric characters).
  • A definition of what each element represents in the version-numbering scheme (e.g., type of change, major, minor, or maintenance release, wildcard, etc.)
  • Definition of elements that indicate use of wildcards (if used). For example, a version number of 1.1.x would cover specific versions 1.1.2 and 1.1.3, etc.
  • If an internal version mapping to published versioning scheme is used, the versioning methodology must include mapping of internal versions to the external versions
  • You must have a process in place to review application updates for conformity with the versioning methodology prior to release.

Req. 5.5 - Risk assessment techniques (for example, application threat modeling) are used to identify potential application security design flaws and vulnerabilities during the software-development process. Risk assessment processes include the following:

  • Coverage of all functions of the payment application, including but not limited to, security-impacting features and features that cross trust-boundaries.
  • Assessment of application decision points, process flows, data flows, data storage, and trust boundaries.
  • Identification of all areas within the payment application that interact with PAN and/or SAD or the cardholder data environment (CDE), as well as any process-oriented outcomes that could lead to the exposure of cardholder data.
  • A list of potential threats and vulnerabilities resulting from cardholder data flow analyses and assign risk ratings (for example, high, medium, or low priority) to each.
  • Implementation of appropriate corrections and countermeasures during the development process.
  • Documentation of risk assessment results for management review and approval.
What the PCI Council wants is to make sure that risks in your application are assessed appropriately and that the process covers all aspects of your application including any third party tools (DLLs, etc.). You will need to document the threat modeling you have done against your application. If you have never done one or are unsure how to do it, Microsoft has a good process and provides a free tool. The out this page for more information: https://msdn.microsoft.com/en-us/library/ff648644.aspx

Req. 5.6 Software vendor must implement a process to document and authorize the final release of the application and any application updates. Documentation includes:

  • Signature by an authorized party to formally approve release of the application or application update
  • Confirmation that secure development processes were followed by the vendor.
Make sure to record the approval of the release of the software and patches. This is to confirm that your secure development processes were followed.

Req. 7.3 - Include release notes for all application updates, including details and impact of the update, and how the version number was changed to reflect the application update.

Make sure you are publishing your release notes and that they include the customer impact.

Req. 10.2.2 - If vendors or integrators/resellers can access customers’ payment applications remotely, a unique authentication credential (such as a password/phrase) must be used for each customer environment.

This one is simple enough; your support people cannot use the same password to access different customers. Avoid the use of repeatable formulas to generate passwords that are easily guessed. These credentials become known over time and can be used by unauthorized individuals to compromise the vendor’s customers.

Req. 13.1.1 - Provides relevant information specific to the application for customers, resellers, and integrators to use.

The Implementation Guide must state the name and version for the software it is intended for. It must also include the application dependencies (i.e. SLQ Server, PCCharge, etc.)

Req. 14.1 – Provide information security and PA-DSS training for vendor personnel with PA-DSS responsibility at least annually

These training materials are for your personnel involved in the development and support of your application. The materials must be about PA-DSS and information security.

Req. 14.2 - Assign roles and responsibilities to vendor personnel including the following:

  • Overall accountability for meeting all the requirements in PA-DSS
  • Keeping up-to-date within any changes in the PCI SSC PA-DSS Program Guide
  • Ensuring secure coding practices are followed
  • Ensuring integrators/resellers receive training and supporting materials
  • Ensuring all vendor personnel with PA-DSS responsibilities, including developers, receive training
You must assign one or more people to be responsible for meeting the requirement in PA-DSS. This person must keep up to date on the changes in PA-DSS as well as make sure the requirements are met. Ensure that each person or persons’ responsibilities are documented.

What's Been Removed

Req. 2.4 - If disk encryption is used (rather than file- or column-level database encryption), logical access must be managed independently of native operating system access control mechanisms (for example, by not using local user account databases). Decryption keys must not be tied to user accounts.

All of the PA vendors I worked with did not do disk encryption but use file, table/column encryption. It makes sense to remove this requirement. In addition, disk encryption is only effective at preventing data loss due to physical theft, which is not a significant concern in the majority of datacenters.

What's Been Significantly Changed

Req. 3.3.2 - Use a strong, one-way cryptographic algorithm, based on approved standards to render all payment application passwords unreadable during storage. Each password must have a unique input variable that is concatenated with the password before the cryptographic algorithm is applied.

It appears that encrypting the password is no longer acceptable. In your application, you must use a strong, one-way cryptographic algorithm (hash) as well as a salt value. Review your application storage to make sure that you are using a hashing algorithm with a salt. They do note that the salt does not have to be unpredictable or a secret.

Req. 4.2.5 - Use of, and changes to the application’s identification and authentication mechanisms (including but not limited to creation of new accounts, elevation of privileges, etc.), and all changes, additions, deletions to application accounts with root or administrative privileges.

Any changes to accounts in the application must be audited. [post_title] => PA-DSS 3.0 – What to Expect [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => pa-dss-3-0-what-to-expect [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1140 [menu_order] => 735 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [5] => WP_Post Object ( [ID] => 1144 [post_author] => 26 [post_date] => 2013-10-03 07:00:38 [post_date_gmt] => 2013-10-03 07:00:38 [post_content] => I have been reading a few articles on outsourcing application development. Many of them have good information on what to look for and how to work with the companies doing the development. However, I have yet to see any of these articles talk about security and how to handle that in the outsourcing process. In any development process, either in-sourced or out-sourced, security needs to be considered. The developers need to be trained in secure coding techniques, and the architects have to have some training and experience on implementing security throughout the software development life cycle. In fact, these should never be excluded or glossed over. The development company you are considering needs to prove this and these requirements must be written into the contract. Have them show you the classes that these people have taken, and make sure they are up-to-date on the latest security vulnerabilities. You cannot have a developer take a course five years ago and consider their skills current. What about after the code is completed? Do your contracts specify a security review? Both an application penetration test and code review must be done before the release of the application. The contract must also specify that any security vulnerabilities must be mitigated. It must also specify that the test will repeat until all high and medium level vulnerabilities are mitigated. Also, have the security testing done by your own company or a third party. It does not matter where the developers are from; it could be India, China, or the United States, there are bad or malicious programmers out there and you must trust them but verify their work. [post_title] => Outsourcing application development – what is missing? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => outsourcing-application-development-what-is-missing [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1144 [menu_order] => 738 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [6] => WP_Post Object ( [ID] => 1165 [post_author] => 26 [post_date] => 2013-04-11 07:00:16 [post_date_gmt] => 2013-04-11 07:00:16 [post_content] => A question came up about a PCI audit that was performed for one of our customers. They just finished their PCI audit and passed. I am now working with them on a new software application and there is a vulnerability in their application that was ranked as a high. This was discovered on an application penetration test back in 2011 but was accepted by the company as a business risk; resulting in the vulnerability being marked closed because of this acceptance. The client wanted to include this same functionality within a new application, resulting in the new application containing the vulnerability. The QSA who performed their last PCI audit should not have passed them because this vulnerability is in violation of Requirement 6.5.6. The requirement states: Prevent common coding vulnerabilities in software development processes, to include all “High” vulnerabilities identified in the vulnerability identification process (as defined in PCI DSS Requirement 6.2). Please note, according to PCI Requirement 6.2, a CVSS score of 4 and above is considered to be a “High” risk vulnerability. Because of this vulnerability and because the company has not fixed it, they could be fined by their bank. Furthermore, this vulnerability could pose financial liability and reputation risk for the company. If customers find out about this vulnerability, they may question the company’s ability as a trusted vendor. So why did the previous QSA pass them? Without discussing this with the QSA, one can assume that since the issue was closed, it was fixed. You have to remember that when the auditor is performing the audit, they are presented with a lot of information. This is a lot like trying to drink from a fire hose.  Things like this vulnerability could have been missed; it was one finding out of many or possibly the auditor assumed that since the finding was closed, that it had been remediated.  Another reason may be the way an auditor interprets the PCI Requirements. This person may not have understood the requirement and made the wrong interpretation.  In many cases, one auditor’s interpretation may be different from another auditor. It does not really matter now, why the company passed their audit, even though they did not fix the vulnerability.  The issue now is that they need to fix it before moving forward.   [post_title] => Why does one QSA pass me and another would not? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => why-does-one-qsa-pass-me-and-another-would-not [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:58 [post_modified_gmt] => 2021-04-13 00:05:58 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1165 [menu_order] => 762 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [7] => WP_Post Object ( [ID] => 1173 [post_author] => 26 [post_date] => 2013-02-26 07:00:41 [post_date_gmt] => 2013-02-26 07:00:41 [post_content] => We have worked with many companies that are following the letter of the law. The law being the PCI Council’s requirement (6.3.2) that all code must be reviewed prior to release. It states: 6.3.2 Review of custom code prior to release to production or customers in order to identify any potential coding vulnerability. Note: This requirement for code reviews applies to all custom code (both internal and public-facing), as part of the system development life cycle. Code reviews can be conducted by knowledgeable internal personnel or third parties. Web applications are also subject to additional controls, if they are public facing, to address ongoing threats and vulnerabilities after implementation, as defined at PCI DSS Requirement 6.6. NetSPI has reviewed and used a number of automated scanning tools. These tools include HP’s Fortify SCA, Ounce Labs (now part of IBM’s Appscan toolset), Veracode, and Checkmarx. These tools do a fine job for what they were built for, performing an automated scan of the source code. All of these tools meet the 6.3.2 requirements, but they simply are not enough. The tools are missing many of the problems that the manual review finds, such as authentication and authorization vulnerabilities, among others. In addition, many companies are providing software as a service (SAAS) solutions for code reviews. By using this service, a company meets the requirement. These services make it easy to do the code reviews; you upload the binaries and in a few days, you get a report with many findings. So now what do you do with this report? Many organizations throw the report back at the developers and say "fix it". The developers look at the overwhelming number of findings and start applying resources to fix them. What many companies have experienced is that these reports contain so many false positives that the developers just give up. Are you really meeting the requirement? Of course you are, but how many vulnerabilities are you missing? Based on what NetSPI has experienced, maybe half of them. [post_title] => Code Review – is automated testing enough? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => code-review-is-automated-testing-enough [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:02 [post_modified_gmt] => 2021-04-13 00:06:02 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1173 [menu_order] => 769 [post_type] => post [post_mime_type] => [comment_count] => 1 [filter] => raw ) [8] => WP_Post Object ( [ID] => 1175 [post_author] => 26 [post_date] => 2013-01-23 07:00:41 [post_date_gmt] => 2013-01-23 07:00:41 [post_content] =>

I was reading a few articles about how mobile devices, because of their popularity, are now the focus of malicious hackers. I thought this was interesting because many companies are developing applications for the mobile platforms and based on the information I have heard, they really do not have a formal process to test these applications for security. Back in March, NetSPI put on a webinar on how to test for security issues in a mobile application. NetSPI also gave this presentation at Secure360 and OWASP NY. I was hoping I would see other companies putting out information on doing this kind of testing and I have seen a few. However, there has not been enough emphasis on mobile application testing. Maybe I am not on the right mailing lists, but many lists contain articles on defending the device itself. I have seen much of the emphasis on MDMs. This is good, but it does not prevent the application from doing a poor job of protecting sensitive data. A couple of questions to ask yourself about securing a mobile application: 

  • Do you know if the developers, either internal or third party, have put a back door in the application?
  • Do you know if your application is storing passwords or keys on its file system in the clear?
  • How about someone putting a malicious application on the Google or Apple stores and this application starts collecting this information?
  • How would your companies reputation be changed because of this, once it gets out to the press?

At a minimum, have the application tested by someone not involved in the development of the application; this can be internal personnel or an external company. At best, have the application and code reviewed for security flaws. What are your reasons you are not doing this? We do not know how We do not have the manpower There is not enough time These are just excuses. Learn the processes, call a company (such as NetSPI) to do the testing for you, but get it done and get it secured.

[post_title] => Mobile Application Testing - Where is it? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => mobile-application-testing-where-is-it [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:31 [post_modified_gmt] => 2021-04-13 00:05:31 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1175 [menu_order] => 771 [post_type] => post [post_mime_type] => [comment_count] => 2 [filter] => raw ) [9] => WP_Post Object ( [ID] => 1176 [post_author] => 26 [post_date] => 2013-01-21 07:00:41 [post_date_gmt] => 2013-01-21 07:00:41 [post_content] => I just read an article about how Oracle Database suffers from "stealth password cracking vulnerability". This means someone trying to exploit this vulnerability can brute force your passwords and you would never know about it. Oracle fixed this vulnerability in the new version of the authentication protocol but decided not to patch the previous version. Therefore, everyone running Oracle 11G will need to upgrade.  Upgrading is going to be an issue for many companies running Oracle 11G since either they cannot or will not upgrade for many reasons. Maybe it is time to rethink this policy in your organization. There is a paper published about the problems in the Oracle Authentication protocol, so your databases are possible being attacked right now. Because many companies do not upgrade, this vulnerability is going to be around for a long time. [post_title] => Oracle’s stealth password cracking vulnerability [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => oracles-stealth-password-cracking-vulnerability [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:52 [post_modified_gmt] => 2021-04-13 00:05:52 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1176 [menu_order] => 773 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [10] => WP_Post Object ( [ID] => 1135 [post_author] => 26 [post_date] => 2013-01-01 07:00:20 [post_date_gmt] => 2013-01-01 07:00:20 [post_content] => Now that we have come upon the new year, it is time to resolve to statically test (code review) and dynamically (penetration test) test your applications. You may be saying to yourself that we do not need to do one or both of these tests, but why? Applications are being attacked with a passion from all sides, including from the inside of your company. Individually, neither type of test can find all of the vulnerabilities in your applications, so by not doing both, there will be vulnerabilities you have missed. If you do have these tests done (one or both), make sure to fix the problems (vulnerabilities) that are discovered. Do not assume that they will not be taken advantage of at any time in the future. We have often heard "Oh, this application is only available internally, nothing will happen" or even "No one can take advantage of that vulnerability" or even better "We will just wait to fix it when we have time". How can you be sure that no one will find the vulnerability? NetSPI has some smart people, but the bad guys also have some smart people. If we can find the vulnerability, given enough time, someone else will also find it. When they do find them, what will they do with it? Steal your information, steal some money, or even worse, ruin your reputation. Will you ever have time to fix these vulnerabilities? These may be put on your list of fixes, but priorities change and marketing may put something on the list that just absolutely has to be added to the application; there goes your time to fix the problems. Now say after me, "I will have my applications code reviewed and pen tested this year." [post_title] => Happy New Year – Have you made your application testing resolution yet? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => happy-new-year-have-you-made-your-application-testing-resolution-yet [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:05 [post_modified_gmt] => 2021-04-13 00:06:05 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1135 [menu_order] => 776 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [11] => WP_Post Object ( [ID] => 1183 [post_author] => 26 [post_date] => 2012-11-19 07:00:18 [post_date_gmt] => 2012-11-19 07:00:18 [post_content] => Physical artifacts are amazing little (okay sometimes big) things that give us insight into how earlier civilizations lived, worked, and played. These rediscovered relics provide such useful information that we wouldn't otherwise have about such time-frames and people. Virtual artifacts are rather similar, just less tangible. Virtual artifacts run the gamut from computer generated artwork, photographs of family, and other critical files denoting and cataloging our (virtual) lives. However, they also include forgotten or discarded files that were never deleted (of course the true digital archaeologist knows how to dig even deeper to get files not securely deleted). As such, virtual artifacts provide keen insight into a system and the system’s owner. Including such files that we probably would have preferred never to see the light of day again. So why should we concern ourselves with these little remnants in our organization’s computer systems? The obvious concern is that of hackers (both internal and external). Virtual artifacts can affect your compliance efforts even without hackers as part of the equation. Depending upon the quantity of information stored in the files (such as data dumps from databases, debug logs, etc.) you may face some potential breach notification issues with significant consequences. These may also undermine all the scoping efforts performed to date, specifically relating to PCI. If those files remain on a file server that is discovered during an assessment, your cardholder data just ballooned beyond the comfort level. During ISO reviews, these artifacts may be as helpful as a hostile witness to your (re)certification case. Alongside these are internal policy violations which may compromise sensitive internal information (employee information such as payroll, etc.). So how do we combat these virtual artifacts within our organization? In essence, where do we start to dig within our virtual landscape? As unfavorable as it may seem, you start at the system most likely to contain such files and just keep going. There are tools that can help automate this process. First think like an attacker; NetSPI’s Assessment Team does just that during penetration tests. They look for unprotected and residual data (the files that are just “left out there”); this includes sensitive data (PII, PHI, cardholder data, passwords, etc.) through generic file system searches. While not overly glamorous, sometimes the simplest method is the best. Then they scour multiple systems at once through spider or crawler tools, and even look at databases and their output. Speaking of, Scott Sutherland has a new blog post that includes finding potentially sensitive information within SQL databases. They find where programmers are leaving their specific output files, debug logs, etc. Sometimes the most nondescript system can have that file you don’t want to see the light of day. So how often should you be performing these internal reviews? It partly depends on your organization’s propensity to leave virtual golden idols lying around and how effective your defenses / controls are. If movies have taught us anything is that the truly daring individual can overcome most controls if the gains are substantial enough.   The best defense is to have guidelines for employees (especially those in positions that generate, or even have the ability to generate) to securely delete files no longer needed (i.e., don’t store the golden idols on pedestals where the sunlight gleams off them like a beacon). For a more realistic example, an application owner or custodian should ensure that their application’s logs that include sensitive information are properly secured behind active access controls, temporary logs are immediately deleted when no longer needed, and the passwords to the system are secured (encrypted), etc. Some may respond and say that the Data Loss Prevention (DLP) tool will catch these, so we are good to go. However some organizations implement a DLP tool focusing on one aspect only (Network, Storage, or End-Point). Each of these components can be overcome through various means. Blowguns (Storage controls), weight-monitoring pedestals (End-Point controls), and giant boulders closing the opening (Network controls) can be all be bypassed by careful and skilled virtual archaeologists. It’s not uncommon for a found stray file to compromise an organization’s compliance efforts. By reviewing your environment proactively you also help make the case that your organization is performed the necessary due diligence should an incident occur. But then the point is to find those files first, leaving nothing for the tomb raiders. [post_title] => Compliance Impact of Virtual Artifacts [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => compliance-impact-of-virtual-artifacts [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:03 [post_modified_gmt] => 2021-04-13 00:06:03 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1183 [menu_order] => 781 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [12] => WP_Post Object ( [ID] => 1195 [post_author] => 26 [post_date] => 2012-08-09 07:00:41 [post_date_gmt] => 2012-08-09 07:00:41 [post_content] => During PA-DSS audits, NetSPI is often asked about what training options payment application vendors have for developers. These questions are in reference to PA-DSS requirement 5.2.a. This requirement states: Obtain and review software development processes for payment applications (internal and external, and including web-administrative access to product). Verify the process includes training in secure coding techniques for developers, based on industry best practices and guidance. The PCI-Council is working with SANS for a set of courses that PA-DSS vendors can use. These courses include fundamental courses for developers and security staff as well as development language specific courses. There are also courses for senior level developers, tester and managers.   An example of one of the courses is Secure Coding for PCI Compliance. This is a two-day course on the OWASP top ten issues and is for a developer with experience in one of the following languages: Perl, PHP, C, C++, Java or Ruby. If you are a payment application vendor needing to start of enhance your training, look at the SANS web site - https://www.sans.org/visatop10/. These should help you get through requirement 5.2.a. Please note, NetSPI is not associated with SANS in any way. [post_title] => PA-DSS vendors now have training options [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => pa-dss-vendors-now-have-training-options [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1195 [menu_order] => 793 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [13] => WP_Post Object ( [ID] => 1196 [post_author] => 26 [post_date] => 2012-08-02 07:00:18 [post_date_gmt] => 2012-08-02 07:00:18 [post_content] => The PCI Council recently announced a new certification program called the Qualified Integrators and Resellers (QIR) Program. In my opinion this fills a gap that has existed for specific environments which typically reflects negatively on merchants or service providers that purchase off-the-shelf payment application solutions. Using a PA-DSS validated payment application is a requirement for merchants as is using it in a PCI-DSS compliant manner. However, the issue appears when resellers or integrators may not be fully aware of how their implementation plan and methods impact the merchant; the entity ultimately responsible for compliance. The issue then manifests during a QSA lead assessment when it is discovered that the system was not implemented properly per the Implementation Guide (segmentation efforts were negated, etc). As a QSA this is a hard conversation to have with my clients, especially since this usually means a non-compliant assessment and the merchant has to spend additional time or resources to resolve the issue. Now I understand that this certification program is not going to solve everything, but having integrators and resellers that are trained similar to PA-QSA’s and QSA’s just helps everyone involved in the process to be on the same playing field. This results with the merchants and service providers reaping the largest slice of Benefit Pie. Questions will come up whether this program will be worth it or if it is going to last since all indications lean towards this program being voluntary. While I get that the PCI Council’s official list of certified integrators and resellers may not be the first place the merchant or service providers go when selecting their next Point of Sale (POS) system (application features versus QIR certified reseller), they can insist that the POS vendor use QIR certified integrators, since in the end it is the merchant or service provider’s compliance status on the line. While still a little scarce since it has not been rolled out just yet, more information on the QIR Program can be found on the PCI Council’s QIR program site at https://www.pcisecuritystandards.org/training/qir_training.php The Council will also be having a webinar August 16 and again on August 29. Additional information can be found at the PCI Council’s Training Webinar page. [post_title] => Filling the Void - QIR Program [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => filling-the-void-qir-program [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:05 [post_modified_gmt] => 2021-04-13 00:06:05 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1196 [menu_order] => 794 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [14] => WP_Post Object ( [ID] => 1197 [post_author] => 26 [post_date] => 2012-07-30 07:00:18 [post_date_gmt] => 2012-07-30 07:00:18 [post_content] => Many organizations have Incident Response plans. They go through the testing and send people through training but when that incident happens and the alarms klaxons begin sounding up and down the hallways the response isn’t what the organization expected.  This strikes a discordant tone since all audits (be they HIPAA, PCI, etc.) always come back clean as they pertain to Incident Response processes. I’d like to take a brief moment to point of few pitfalls from “on paper” to application, but I’ll leave fixing them to you.

Training

Training is a serious investment for both the organization and the individual. Unless you employ a full-time Incident Response Team this level of training is akin to insurance. Until you need it, it doesn’t seem to really be needed, yet this doesn’t decrease the importance to your organization. Think of them as similar to a volunteer fire department: training is needed even though the volunteers have day jobs. And continued training is critical.  Sending people to quality training versus having them read a book from Amazon is like sending that volunteer fire fighter to CPR class versus watching a YouTube video.

Testing

Tabletop testing is fine at first but you should strive to get past the tabletop as quickly as possible since this doesn’t simulate the climate of an actual incident. You want the individuals that went to training to use those tools they learned on, assuming they aren’t using them daily. When using just table top exercises you won’t know if everyone is really cut out for Incident Response. Most individuals will agree to participate, but when the rubber meets the road at 3am and the pressure is on to properly contain the incident, are they still willing and able to perform? It’s difficult to admit, but not everyone is cut out for Incident Response. I’m not saying you should get a full simulated environment, but the individuals that went to training should be testing those tools and their skills. Attaching a test scenario to a disaster recovery test is often the most effective and easiest way to minimize additional downtime to production systems. Some tests can be performed that won’t have any impact on the networked environment such as chain-of-custody process testing.

Lessons Learned

While this often happens, it sometimes is performed more as an accusation arena with pointed fingers and tempers flaring. If needed, have a moderator but keep to the task at hand – determining what happened, what was the full impact, what will prevent the event from happening again, are other systems vulnerable? Write up an agenda of questions and keep people on task. But what’s really important is to follow up on the remediation work. One great way may be to plug any remediation efforts into your Risk Management program, the only caveat here being that you should  keep everyone at the Lessons Learned event informed. If the Incident Response team is not involved in the remediation efforts (even if just informed) then they may not be aware of certain configuration changes that may be relevant for future Incident Responses.    It’s important that you practice the Lessons Learned process by performing them against your tests as well. As you consider these suggestions, ask yourself: if that event were to occur today, how ready will you really be? [post_title] => Incident Response – When Expectations Go Astray [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => incident-response-when-expectations-go-astray [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:07 [post_modified_gmt] => 2021-04-13 00:06:07 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1197 [menu_order] => 795 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [15] => WP_Post Object ( [ID] => 1202 [post_author] => 26 [post_date] => 2012-06-28 07:00:18 [post_date_gmt] => 2012-06-28 07:00:18 [post_content] => Certain events remind us of the important things; holidays may remind us of loved ones or perhaps how dysfunctional families can be. When our favorite (I use this term loosely) word processor crashes forever losing the most insightful blog document ever written, we realize we should have saved that document with greater frequency. When such an event happens to others we can use these as safe reminders for ourselves. Just like when Word crashes on the neighbor’s system, we can mumble, “should have saved more often…” whilst we hit the save button on the document that’s still titled Document1 on our own computer. In this same vein let’s take a look at the recent events that befell LinkedIn and eHarmony. With the recent password breach that befell those two organizations, has your organization done anything? It may seem odd to ask what your organization did in regards to another’s incident but this is a great opportunity for some security (re)awareness. Even if you don’t allow access to LinkedIn or eHarmony within your environment this can be an excuse to engage your company employees because odds are there are many who have an account on, at least, one of those two sites. The focus of the message shouldn’t be on strong passwords (complexity, maximum age, etc.) – although still good topics. However, password strength and associated requirements are most likely covered already in your annual training programs and via policies (if they aren’t, they should be). Instead discuss that which allows you to reach the audience on a personal level, and one that will hopefully have positive benefits within the work place. For this security awareness notice, center on the usage of passwords across multiple locations/sites. The incidents at LinkedIn and eHarmony involve the compromise of the password hashes (the hashes were copied outside of the respective sites). This doesn’t mean that the hashes for all affected users have been compromised (yet) but they can be using brute force methods given enough time. Some have made light about the consequences of what can be done to their compromised LinkedIn accounts, but the true threat to users is if they use the same credentials on multiple sites. To cross the boundaries of personal use to the workplace, what if the credentials match those within your organization? This is where we hope to raise awareness across the company to minimize this potential risk. This message should offer suggestions for using unique credentials on different sites/systems. While this may seem to suggest creating weaker passwords or passwords that can be guessed easily enough, “eHarmony-password” versus “LinkedIn-password” there are tools that can make this easier for individuals to track their personal passwords while keeping them strong. Tools like KeePass and PasswordSafe are local apps (they can also be put on USB flash drives – but only mention USB flash drives if they are allowed within your environment). However there is also a “cloud” service in LastPass. However if you decide to include mentioning such tools it is critical to include the notice to remember that master password! It’s often difficult to get people to actually pay attention to security alerts but using an event that has personal associations across departmental lines, roles, and levels is hard to pass up. Take advantage of this one while it’s still hot. Hopefully by getting individuals to use unique passwords on different sites that will include passwords used within your organization as well! [post_title] => Passwords: Strength and Longevity vs. Uniqueness [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => passwords-strength-and-longevity-vs-uniqueness [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:52 [post_modified_gmt] => 2021-04-13 00:05:52 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1202 [menu_order] => 799 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [16] => WP_Post Object ( [ID] => 1208 [post_author] => 26 [post_date] => 2012-05-04 07:00:18 [post_date_gmt] => 2012-05-04 07:00:18 [post_content] => For those that aren’t keeping track, June 30, 2012 is a day to mark on your calendar.  Not because of any special anniversaries or birthdays (although if yours does fall on that day then Congratulations!).  June 30 is the day that we can add one more validation point to our compliance lists from the PCI Data Security Standard.  The testing procedure for requirement 6.2 will transition the risk ranking assignment to new vulnerabilities from optional to mandatory.  And yes, this does impact those filling out a Self-Assessment Questionnaire (SAQ) as well, but only the SAQ D. Specifically the requirement’s reporting detail reads: If risk ranking is assigned to new vulnerabilities, briefly describe the observed process for assigning a risk ranking, including how critical, highest risk vulnerabilities are ranked as “High”* (Note: the ranking of vulnerabilities is considered a best practice until June 30, 2012, after which it becomes a requirement.) * The reporting detail for “Observe process, action state” is not required until June 30, 2012 Personally, I think this is a good idea as it actually gets you thinking about the impacts of the vulnerabilities specific to your organization.  It also allows you to downgrade the vendor supplied criticality should you have existing controls in place to lessen the vulnerability realization.  A common example is having to apply a patch to a web server on a very restricted network (full Access Control Lists, etc.) because the vendor rated it critical (the patch fixed an exploit for remote code execution).  The critical rating is perfectly valid for public facing websites but not as severe for servers that don’t interact with the Internet. For those that don’t currently have an established risk assessment process in place (or those that could use some tweaking), the following blog posts might be helpful; “The Annual Struggle with Assess Risk” and “Measuring Security Risks Consistently.”  Seems like we planned those other blogs, doesn’t it? [post_title] => The Choice is No Longer Yours - Changes to PCI [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => the-choice-is-no-longer-yours-changes-to-pci [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:15 [post_modified_gmt] => 2021-04-13 00:06:15 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1208 [menu_order] => 805 [post_type] => post [post_mime_type] => [comment_count] => 1 [filter] => raw ) [17] => WP_Post Object ( [ID] => 1212 [post_author] => 26 [post_date] => 2012-03-26 07:00:18 [post_date_gmt] => 2012-03-26 07:00:18 [post_content] =>

Let’s start with a little exercise. Rate the risk for the following events.

  1. Going 15 mph over the speed limit.
  2. Using a public wireless internet connection at the airport.
  3. Using a third party for payment services.

If you were to ask your neighbor how they would rate them, would it be the same?  Go ahead and ask them, I’ll wait.  For those not asking, do you think they would be the same?  Probably not.  Assigning a risk label to an event is too subjective.  It’s based upon the person’s experience, profession, and situational awareness.  How one labels risk most likely will not be the same as someone else.  This is mostly due to the lack of comparable impacts. Assigning impact consistently is manageable with guidance.  These may include factors such as:

  • Fiscal costs to replace/fix.
  • Employee hours needed (will you have to outsource?)
  • Damage to reputation (usually more for service providers)
  • Harm to individuals (employees and / or patients)?

Each of these factors and the threshold from one to the next is organization specific.  $10,000 in replacement systems for one company may be fairly significant while for another it may be the budget for the annual holiday party.  Establishing the different thresholds for each of your risk layers will make this a repeatable process.  It’s an easier process than most think; just go through the possibilities for each.  If this would cost our organization $__________ it would be bad, $____________ is really bad, and $_______________ is “I’m packing up my office right now.”  Just keep doing that on all your impact decision factors. Creating a matrix will help quickly assign such risk impacts and also ensure that the right people are involved the process.  That’s correct: assigning risks, the impact, and the likelihood, shouldn’t be a one person job; there are too many factors for one person to know.  Healthcare is a great example.  IT can determine how much it would cost to replace/fix a server but IT most likely will not be able to properly gauge organizational reputation damage and the potential harm to patients. Having more people with different roles also brings more situation awareness (i.e., threat likelihood) to the risk assignment process.  They may be aware of additional controls which could lessen the change of the risk being realized. The more the situational awareness is raised allows your company to assess risks with greater understanding and accuracy. For example, would your risks you assigned to the examples above change with the following?

  1. Going 15 mph over the speed limit in a school zone.
  2. Using a public wireless internet connection at the airport after Defcon.
  3. Using a third party for payment services that continues to suffer data breaches.

All of the aspects above increase the maturity level of risk assignments used in Risk Management programs, audits, and everyday operations. It helps everyone within the organization speak the same language and ensure that we compare apples to apples.  When everyone is on the same plane and knows how the risks are being assigned there tends to also be less resistance to risk reducing initiatives. This level of organizational “buy-in” is crucial for those projects that have a large impact radius and cross many departmental boundaries. So how does this all start?  The easiest is to integrate this process as part of your Risk Management program and during each Risk Assessment. Use the same processes for your internal audits and have external companies either use your process or provide enough information to allow your group to rate findings again internally. Document the process and the various factors and make sure all involved know what they are. This will lead you down some interesting conversations, but stick to it! Having an established and consistent process turns the arbitrary into the meaningful.

[post_title] => Measuring Security Risks Consistently [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => measuring-security-risks-consistently [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:08 [post_modified_gmt] => 2021-04-13 00:06:08 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1212 [menu_order] => 810 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [18] => WP_Post Object ( [ID] => 1214 [post_author] => 26 [post_date] => 2012-03-13 07:00:54 [post_date_gmt] => 2012-03-13 07:00:54 [post_content] =>

Gettin’ Your Internal Security Assessor on…

Friendly reminder: after June 30 of this year, all Level 2 MasterCard merchants performing their annual self assessment must ensure that their internal resource has attended ISA (Internal Security Auditor) training.  Alternately, Level 2 merchants can hire a Qualified Security Assessor to perform the assessment and sign off on their Level 2 self assessment Attestation of Compliance.  This is a change from the current requirements, which allow for any internal staff to perform the Level 2 assessment. The ISA program is maintained by the PCI Security Standards Council; training consists of four one-hour online courses followed by two days of onsite instructor-led training.  At the end of the course you even get a certificate that you can use to win friends and influence people! Based on feedback received from current ISAs working for my clients, it sounds like the training is valuable even to those with a deep PCI background.  As ISAs receive (essentially) the same training as a Qualified Security Assessor, there are multiple benefits to keep an ISA on staff:
  • By attending SSC-approved training, the ISA is getting the most current and relevant interpretations of the DSS.
  • An ISA is an “internal QSA” and also an employee; therefore the ISA may have the advantage of a deeper familiarity with the organization’s people, environment, and processes compared to an external consultant/auditor.
  • For a variety of reasons, most organizations still choose to use an external QSA firm for audits; however, ISAs tend to be an excellent interface to an external QSA, and can be useful as a second opinion if the QSA firm sends Cousin Eddie to do your audit.
  • An ISA can provide an enhanced understanding of the Data Security Standards (DSS) requirements as they relate specifically to your organization, and can keep you apprised of current and emerging trends in the payment card sphere.
  • Having an ISA on staff is the modern version of having a Royal Wizard in your court.  Though I am not supposed to speak of this, part of the advanced QSA/ISA training involves learning all manner of arcane magic.  The ISA may be able to teach you some tricks or perform at your company holiday party.
If the changes to the MasterCard Level 2 merchant requirements affect your organization there is still time to sign up for training (ISA training schedule is here).  You’ll want to become an ISA yourself when you see the locations - London in April, anyone? [post_title] => New MasterCard Level 2 Validation Requirements Effective June 30, 2012 [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => new-mastercard-level-2-validation-requirements-effective-june-30-2012 [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:09 [post_modified_gmt] => 2021-04-13 00:06:09 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1214 [menu_order] => 812 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [19] => WP_Post Object ( [ID] => 1215 [post_author] => 26 [post_date] => 2012-02-21 07:00:18 [post_date_gmt] => 2012-02-21 07:00:18 [post_content] => You may have seen some of the recent articles regarding a research paper that documented a discovered flaw in some commonly used encryption schemes, including those used for online transactions.  I think it’s important to point out that the sky isn’t falling.  That said this may be a good time to check your encryption processes and determine if this really applies to you.  Within the paper the researchers determined using 1024-bit RSA provides “99.8% security at best.”  This isn’t systemic for all processes; the researchers did not find the same problem after looking at 5 million OpenPGP keys (which is the source of the paper’s title). Without getting too far into the technical aspects of the paper, the researchers found that numbers used in the creation of the keys weren’t so random after all.  This culminated in critical parts of the algorithm being similar to another key.  Thus the keys were the same. What does this mean for you and your organization?  Time to check your encryption settings and certificates.  If you outsource this as part of your e-commerce solution, have the vendor validate their settings.  If you use RSA keys you might consider changing them, of course this isn’t something that most organizations can/will do with minimal impact.  One of the big questions I foresee is if this will affect your PCI Compliance?  At this time no. While many recognize that risk posed by the redundant keys found by the researchers is significantly less than it might otherwise be, you most likely will be safe.  However this is something to keep tabs on.  If further research continues to find issues with how the prime numbers are generated within the methods, it may be time to start the switch. Overall, it’s important to remember that if you use the RSA keys, the sky isn’t falling all around you, just 0.2% of it is. [post_title] => Unique Encryption Keys Not So Unique [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => unique-encryption-keys-not-so-unique [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:56 [post_modified_gmt] => 2021-04-13 00:05:56 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1215 [menu_order] => 813 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [20] => WP_Post Object ( [ID] => 1216 [post_author] => 26 [post_date] => 2012-02-17 07:00:18 [post_date_gmt] => 2012-02-17 07:00:18 [post_content] => Social media has both helped and hurt organizations and healthcare is certainly no exclusion. Many entities are getting on, or have been on for some time, the social media band wagon. In fact this is not the first time we’ve mentioned it here on our own blog. Some organizations have seen a great boon when it comes to using the many varied venues of social media, with probably the exception of anyone still left on MySpace. However, social media can also hurt organizations, and while the cases tend to be somewhat cut-and-dry, “you posted a patient’s personal information on Facebook, so you are fired” it’s the organizational response which I find most interesting. Searches on the internet can find many organization’s social media policies posted online (I don’t understand this; but that’s for another day). Perusing these policies you get the gamut from ‘gentle guidance’ to Orwellian 1984-esque policies. So why such a spectrum? Organizational culture aside, they are mostly indicative to where breaches have occurred. While I understand that healthcare breaches are (starting to be) a big thing, I believe the over-handed policies go too far and will never make the changes they strive for. Some of these policies read like they are taking away an employee’s right to express themselves via any social media outlet without the oversight and approval of management, even if it’s their own personal account written during non-business hours. This is also usually followed up with web filtering to remove the ability to gain access to Facebook, Twitter, or other popular social media sites (sorry again MySpace). Ironically enough, I’ve seen this happen and then the company emails all employees saying to “like” the company’s Facebook page and/or follow their Twitter feed. This tactic will never work for a few main reasons. Human are social and companies can’t filter all channels to social media, even during business hours (i.e., smartphones). Remember when Egypt attempted to block Twitter during the protests? Short of the having the ‘Thought Police’ and ‘Ministry of Love’, people will always share their thoughts, some more than others. With the many technological advances it’s become easier and easier, now people can take a photo and upload it to their medium of choice in seconds. This can lead to some fairly significant issues for organizations, especially healthcare. So how does an entity prevent these breaches? By setting expectations with reasonable limitations. What I mean by this is educate everyone what is acceptable and what is not. Telling employees that they can’t say anything bad about their job isn’t going to work. Telling them that they can’t use copyrighted materials (logos) or act as a company agent on a personal blog is acceptable. Informing them of libel and how far is too far is key for when employees become disgruntled (hopefully this never happens to you). Understanding that filtering social media sites is not going to be a control that prevents material from getting online and that it will be a time management control at best (assuming smartphones aren’t prevalent). The successful policy both defines the acceptable boundaries of personal social media as it relates to the organization and educating employees on what to self-scrutinize before posting; pictures from work with a patient walking in the background, posts that may read like an organization-sanctioned post, etc. This ensures that the “what” comes across but also the “why.” This balanced approach is at least easier for organizations that don’t yet have their own Thought Police. [post_title] => Social Media and Healthcare: Bane and Gain [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => social-media-and-healthcare-bane-and-gain [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:15 [post_modified_gmt] => 2021-04-13 00:06:15 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1216 [menu_order] => 814 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [21] => WP_Post Object ( [ID] => 1217 [post_author] => 26 [post_date] => 2012-02-09 07:00:54 [post_date_gmt] => 2012-02-09 07:00:54 [post_content] => While getting compliant and passing your yearly Report on Compliance audit or filling out a Self Assessment Questionnaire is important to your organization and your customers (and a requirement for merchants and service providers), the PCI Data Security Standard (DSS) is intended to be the foundation of an ongoing program, ensuring you follow best practices throughout the year.  I continue to work with clients who overlook the maintenance aspect of the DSS, and few things are worse than scrambling to update everything at once while you are in the middle of an audit.  In this past year, I have come across several instances of companies who overlooked a key time-based DSS requirement and were forced to use compensating controls or simply could not meet compliance because of the oversight. The DSS does little to protect your cardholder data and systems if you think of it as something that you only have to do once a year.  Maintaining your program should be like maintaining your house: don’t wait to fix that leaky pipe, repair the broken window, fix the lock on the door, and take out all of the trash right before your mother-in-law shows up - you don’t want to deal with it all at once, and neglect can lead to increased effort, expense, security gaps, and non-compliance.  Similarly, following a scheduled maintenance routine can help you purge unnecessary accounts and data, provide visibility into your processes, train personnel, and ensure that different business units are aware of and performing their expected duties. The cheat sheet in the following whitepaper was developed to help you prioritize, schedule, and assign responsibility for the tasks that must be performed on a periodic basis to meet DSS 2.0 requirements.  Throw this in a spreadsheet, update your group calendar, or transfer this to your GRC tool, and then off to the beach for a Mai-Tai! Care and Feeding of your PCI DSS Compliance Program [post_title] => Care and Feeding of your PCI DSS Compliance Program [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => care-and-feeding-of-your-pci-dss-compliance-program [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:02 [post_modified_gmt] => 2021-04-13 00:06:02 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1217 [menu_order] => 815 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [22] => WP_Post Object ( [ID] => 1220 [post_author] => 26 [post_date] => 2011-12-29 07:00:18 [post_date_gmt] => 2011-12-29 07:00:18 [post_content] => While most healthcare organizations work on securing PHI there is usually one element that I’ve found that isn’t secured with the same rigor as most other physical PHI; X-rays. X-rays waiting for disposal companies to come and haul them away are usually left unsecured and not monitored. The problem is that individuals have found that they can recover the silver found within the film. While it isn’t a lot of silver (roughly 2% of the film’s weight) a few hundred pounds could make it a lucrative venture. That’s why it’s not surprising that thieves have begun stealing them. Let’s be honest here, when was the last time you checked the credentials of the crew coming to take away what you would consider to be garbage? The issue here isn’t that these films will be used for identity theft purposes, it’s that you are now forced to go through breach notification procedures at your cost… for what is technically considered refuse! Three organizations in Pennsylvania already had to go through this as they’d fallen victim to thieves stealing the films from unsecured areas, and in one instance posing as a radiological film destruction company. What can you do? Start securing X-rays and make sure they aren’t accessible to unauthorized parties, regardless whether the file is useful or scheduled for destruction. Many organizations store the X-rays near the equipment in semi-open rooms. If the rooms aren’t used 24x7 then you should either secure the room when not in use using your normal physical security system (key, badges, dragons, etc.) and monitoring equipment. If you don’t want to go to such extreme measures (I hear dragons eat a lot) then you may consider digitizing your x-rays and then securely dispose of the physical copies. Otherwise you may want to start recovering the silver yourself to help pay for the breach notification efforts you might find yourself facing. Further reading: [post_title] => Secure the Silver [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => secure-the-silver [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:05:54 [post_modified_gmt] => 2021-04-13 00:05:54 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1220 [menu_order] => 818 [post_type] => post [post_mime_type] => [comment_count] => 1 [filter] => raw ) [23] => WP_Post Object ( [ID] => 1221 [post_author] => 26 [post_date] => 2011-12-12 07:00:18 [post_date_gmt] => 2011-12-12 07:00:18 [post_content] =>

Theodore J. Kobus III published his A to Z of Healthcare Data Breaches, which he presented at the annual America Society for Healthcare Risk Management conference. This list may be ideal to use or model your own internal training after for more than just data breaches. Initially I thought of trying to showcase some of them in a silly reference; but I thought it might be too OPAQUE.

O – Overreacting is not going to get you through the event

P – Preparedness is key

A – Accept that it will happen to you

Q – Quit keeping old data

U – Understand the laws that impact your organization

E – Empathize with your customers/patients/employees – how are they going to react to your response?

In all seriousness; Q and A (no pun intended here) are both important and I wanted to point those two out. If you don’t need the data, as an organization you need to ask yourself, “what are we gaining by keeping this data?”  The liability is attached to every piece of information you retain regardless if you use it or not.  Having (and following) data retention policies will limit such a liability. Accepting that it is going to happen, now that’s a hard pill to swallow.;but similar to Emergency Preparedness techniques that many organizations routinely practice.  As they say, practice makes perfect even if you never have to use those techniques.  Organizations that routinely train for various circumstances are the ones best prepared to handle them.  If you accept that a data breach is going to happen, you’ll find yourself equipping and (more importantly) training for how to respond.  Whether you attach this to existing emergency practices or not is not as important as actually having a response.  Many organizations have suffered both from a Public Relations perspective and financially (fines) by their seemingly lack of response. In the end, training staff how to deal with data breaches because you accept that it will happen will yield positive results from a negative situation.  It’s amazing how people remember what to do during emergency situations; I still remember to get under my desk during an earthquake.

[post_title] => Data Breach Alphabet Soup [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => data-breach-alphabet-soup [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:04 [post_modified_gmt] => 2021-04-13 00:06:04 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1221 [menu_order] => 819 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [24] => WP_Post Object ( [ID] => 1224 [post_author] => 26 [post_date] => 2011-11-11 07:00:18 [post_date_gmt] => 2011-11-11 07:00:18 [post_content] => Many companies have been in this dilemma before, "if I update and publish this new policy our organization is immediately out of compliance, but no one will make any changes without the policy."  Pondering this, "Yossarian was moved very deeply by the absolute simplicity of this clause of Catch-22 and let out a respectful whistle. (p. 46, ch. 5)[1]" For those that suffer through this during your Policy Update sessions, there a few ways to break out of this cycle: 1. Establish a Grace Period when policies are updated. This is usually established within a policy about policies (feel like the definition of recursion?). Some organizations will issue policies with a Published Date and next to it an Effective Date. This reminds readers about the Grace Period while reinforcing the expectation that compliance is required in the near future.

a. Pros: Staff can work towards compliance by the established deadline without the label of 'Non-Compliant.' Project plans, budgets, and resources can be lined up to tackle the changes.

b. Cons: Effective dates may be too soon for some large changes, but having different effective dates for some projects but not everything leads to confusion. If the timeframes don't run in parallel with budget cycles then there may not be enough available funds for changes that require fiscal resources. The other concern is that during the Grace Period, there may be the perception of having two active policies which may lead to some confusion.

2. Establish, or merge with an existing, Exception Process for non-compliant areas when the policies are published. If there are areas of non-compliance when the policies are updated then an exception must be immediately requested for a temporary acceptance. Part of this exception process will be to establish a plan of attack for reaching compliance.

a. Pros: The exceptions help to prioritize the identified non-compliant areas which may make it easier to see the total cost of compliance; this method is easier for organizations that have strong Project Management departments.

b. Cons: It may be overwhelming for the team reviewing all the exception requests. Especially for those that can't assess all associative risks (such as business versus IT risks). There will also be overhead to track all the exceptions and the deadlines. Continual exception requests will have to be managed appropriately.

3. Establish a Hybrid Approach. This method takes a little from each above with tweaks to meet the needs of your organization. For example, establishing a short Grace Period for new / updated policies and anything that will need longer must be identified immediately and go through the Exception Process.

a. Pros: A sooner effective date will meet with regulatory requirements quicker. There may be a smaller Exception handling team yet the organization still receives the benefit of using Project Management to handle the outliers.

b. Cons: It is easy for this method to slide more into the Exception Process without the constant enforcement of the effective dates. A shorter Grace Period may result in an unexpected amount of Exception requests depending upon the policy.

Regardless of the method, the most successful implementations negate the Cons listed above with two major factors: (1) Management's full support (which includes enforcement) and (2) communication.  Lack of those two elements often will leave you with a feeling that the wheels are spinning, but you aren't moving.  Of course funding, or the lack thereof, is like a car with no gas - it's only great if you want to go where you already are.  The corporate culture may also dictate which approach is more likely to succeed.  Proactive organizations usually try for the Grace Period method while reactive organizations are better suited for the Exception Method.  This isn't a slight against one or another, but in those instances the culture has established tools and workflows designed for one or the other.  For example; reactive cultures are usually found in healthcare, especially hospitals, since that's the name of the game: reacting to the events around them.  Financial institutions tend to be more proactive due to many of the existing regulations (SOX, GLBA, etc.).   It's not to say that you won't find Proactive healthcare institutions (which some are trying to be) or reactive financial organizations.  Hopefully adoption of one of the above methods helps during your next Policy Update cycle so you can make changes happen; as behaviors, controls, and other requirements usually won't change just because they can.  "Catch-22 says they have a right to do anything we can't stop them from doing.
[1] Heller, Joseph.  Catch-22. Simon & Schuster, 1961. [post_title] => The Catch-22 of Policy Updates [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => the-catch-22-of-policy-updates [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:15 [post_modified_gmt] => 2021-04-13 00:06:15 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1224 [menu_order] => 823 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [25] => WP_Post Object ( [ID] => 1227 [post_author] => 26 [post_date] => 2011-10-05 07:00:18 [post_date_gmt] => 2011-10-05 07:00:18 [post_content] => At the 2011 Black Hat Conference, Security Researcher Jay Radcliffe demonstrated what many healthcare security professionals have been concerned with; hacking a medical device.  Medical devices have developed from isolated islands into systems with embedded operating systems that communicate with other applications.   As such, a new threat window opened.  Apart from the obvious benefits that such advancements have brought to healthcare, it also brings some responsibilities.  Since Mr. Radcliffe's presentation there has been lots of discussion about the security of insulin pumps and what the manufacturer should do.  However I'd like to discuss the broader topic and maybe from a slightly different angle.  Speaking generally about medical device security there is a lot of confusion about what can be done to ensure that privacy and security is maintained on, for all intents and purposes let's call them "smart" devices.  Many individuals will say that FDA regulated devices cannot be altered in any way.  However; the FDA itself has published articles going back a couple of years now indicating that this is incorrect.  Aware of such misinterpretation a November 2009 post clearly reminds readers that "cybersecurity for medical devices and their associated communication networks is a shared responsibility between medical device manufacturers and medical device user facilities."  That's a powerful statement and what some may think upon first read, unfair.  This doesn't just say it is solely the responsibility of the device manufacturer but also to the organization that uses, distributes, and maintains them.  If a pump or other medical device that transmits information and/or receives instructions remotely (such as heart pumps) fails, the patient will most likely go back to the covered entity for a reason.  It doesn't matter if it's because the pump was damaged, altered maliciously, or just had a design flaw, both organizations will take a public relations hit.  So what does this mean for covered entities?  Devices used and distributed by covered entities should have had security as part of the design process and allow for updates if necessary.  For example, if the device uses a Windows operating system, how will it receive updates and what department will be responsible for that?   If you'd like to get more involved in this type of discussion check out the HIMSS Medical Device Security Work Group or the FDA Draft Guidance which is out for comments now. [post_title] => Medical Device Security [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => medical-device-security [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:08 [post_modified_gmt] => 2021-04-13 00:06:08 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1227 [menu_order] => 825 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [26] => WP_Post Object ( [ID] => 1231 [post_author] => 26 [post_date] => 2011-09-27 07:00:54 [post_date_gmt] => 2011-09-27 07:00:54 [post_content] => The PCI Security Standards Council (SSC) has recently released the latest version of the 2.0 Report on Compliance (ROC) Reporting Instructions (formerly called the "scorecard").  This document had previously been for use by QSA auditors only; it is the secret sauce used to perform a Level 1 PCI audit. For those of you lucky enough to have gone through a L1 audit, the "scorecard" is the super secret document that the QSA kept stored on the triple encrypted drive in the TEMPEST-approved tamperproof tungsten-lined briefcase handcuffed to her wrist.  QSA's were not allowed to share the criteria on which the company was being audited (scored) on; the reporting instructions require the QSA to perform one or more of the following validation steps for every requirement:
  • Observation of system settings, configurations
  • Documentation review
  • Interview with personnel
  • Observation of process, action, state
  • Identify sample
Well, good news everyone!  The document is now available to the general public. Hopefully, this will eliminate some of those awkward moments that seem to always come up during an audit: QSA: "You need a documented policy that says you use network address translation. That's not written down anywhere." Customer: "Can you show me where it says I need to do that in the DSS?" QSA: "You won't find it there, but I promise it says it somewhere.  I'm not allowed to show you, just trust me, you need it". Customer: "Can you just let me peek over your shoulder?" QSA: "If you saw it, I would have to have your memory wiped.  Have you ever seen "Men in Black""? Customer: "I'm calling Security". It's pretty hard to follow the rules when you're not allowed to know what they are.  With this document's public release a company can actually evaluate their controls and compliance program against the same standards that a QSA will use; no more guessing how to meet a requirement,  no more conversations where the auditor gives a seemingly arbitrary failing finding, with a "because I said so" for the explanation.  This should also allow for organizations to get a much better picture of the intent and expected implementation of a requirement by understanding how the controls will be assessed.  Well done, SSC. [post_title] => PCI 2.0 scoring matrix released to the public (now your kids can play “PCI Auditor” at home!) [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => pci-2-0-scoring-matrix-released-to-the-public-now-your-kids-can-play-pci-auditor-at-home [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1231 [menu_order] => 830 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [27] => WP_Post Object ( [ID] => 1236 [post_author] => 26 [post_date] => 2011-08-08 07:00:54 [post_date_gmt] => 2011-08-08 07:00:54 [post_content] => From the "never been asked that question before" files, I recently had a client who wanted to know about wireless keyboards and whether they are in-scope for PCI. There are no PCI requirements that address keyboards or other wireless peripherals (though you could make a case that some keyboards transmit unencrypted cardholder data over 'open, public networks'). Just to double check, I reread the Security Standards Council's Wireless Special Interest Group publication on wireless best practices and PCI; the guidelines are geared towards 802.11 WLANs and specifically exclude Bluetooth. Wireless keyboards are ubiquitous; there is a reasonable chance your organization is using them as the interface to a POS application or virtual terminal. The input could include customer name, expiration, PAN, and CVV. As we typically wouldn't pay much attention to the peripherals that we type this data on, the question got me thinking about how much we take technology (and its security through obscurity) for granted. I did some exhaustive research on the subject (at least 5 minutes searching Google) and easily found some real world examples of wireless keyboard sniffing techniques; though not currently a prevalent attack, it is quite feasible to intercept the output from a wireless keyboard without leaving fingerprints behind. Unlike traditional keystroke loggers and screen scrapers, which can often be detected by antimalware applications, wireless attacks are transparent and do not require physical or logical access to target machines. One of the more advanced tools out there is on Remote Exploit's site, called KeyKeriki. This is a combination of hardware/software that targets the wireless signals from 27MHz keyboards (there's a 2.7 GHz version on the way, too) and can capture or output the keystrokes. The hardware looks simple to build and includes an SDCard for logging; additionally, the software can do decryption of some weak XOR-based encryption on the fly (it takes about 40 keystrokes to get enough data to decipher the stream in real-time). I don't want to go too far down the rabbit hole here as you can't defend against every attack vector (PCI doesn't address TEMPEST or Van Eck phreaking either), but there are some simple steps that can be taken to reduce the risk of compromise:
  • Include standards for input devices in your list of approved hardware; pick keyboards that use strong cryptography to transmit data.
  • It looks like many of the exploits are written to take advantage of certain vendor's keyboards (I'm looking at you, Logitech and Microsoft...). Do some research when purchasing wireless keyboards to see if their communications security has already been compromised.
  • If you do have a need for wireless input devices, consider using Bluetooth, which offers some protection through the use of a PIN and a custom SAFER+ block cipher implementation. Check the footnote for a good publication on Bluetooth and security from NIST.
  • Drink plenty of coffee and/or adult beverages of your choice before typing credit card numbers. The resultant twitching/lack of coordination will make it more difficult for a malicious user to extract useful information from your typing. Bonus: it's fun.
  • Consider using wired keyboards for virtual terminals and POS workstations. Remember those things?

References:

[post_title] => PCI and the "other wireless" [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => pci-and-the-other-wireless [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:10 [post_modified_gmt] => 2021-04-13 00:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1236 [menu_order] => 837 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [28] => WP_Post Object ( [ID] => 1238 [post_author] => 26 [post_date] => 2011-07-28 07:00:18 [post_date_gmt] => 2011-07-28 07:00:18 [post_content] => When you think back to some of the big technical evolutions that changed our lives in positive ways, email certainly stands among them. While the basic tenants of email haven’t really changed, how we use it has. For many, email has morphed from a pure messaging system to an instant messenger, file transfer system, data storage location, and more. While the email’s functions, abilities, and uses have increased exponentially; so have the liabilities. I’m not advocating that we get rid of our email servers (sorry Postal Service). I am advocating rethinking how it’s viewed in our corporate environments. Both regulatory (HIPAA[i]) and non-regulatory bodies (PCI[ii]) have requirements regarding storage and transmittal of sensitive information. When Email servers are used to process, store, and/or transmit such data these elements fall under the scrutiny of these governing bodies (and I won’t get into E-Discovery issues here either). So what’s my point? We need to educate users on how to use email appropriately when it comes to sensitive information. Email is fantastic but users need to be aware of what can be sent in the clear versus encrypted. Do users know when and how to encrypt data before they send sensitive information outside your organization? And the oft forgotten, do they know what to do when they receive such information either? Don’t incur the penalties of breach notifications because someone else sent you sensitive materials that remain on your server in some inbox. Beyond training there is always more that can be done, however it needs to align to your organization’s security posture and culture. From experience, some have gone as far as disallowing or limited attachments, deleting all emails over 3 months, utilizing spam filters to also search incoming emails for sensitive elements (beyond just spam and other malware), using a Data Loss Prevention (DLP), and performing periodic scans of the email server to ensure that sensitive information is not stored within just to name a few. With proper controls and training we can still use email and all it has become but in a more secure and compliant manner.

[i] HIPAA §164.312(a)(2)(iv) Encryption and decryption (Addressable). Implement a mechanism to encrypt and decrypt electronic protected health information.
[ii] PCI-DSS Requirement 4.2: Never send unprotected PANs [Primary Account Numbers] by end-user technologies (for example, email, instant messaging, chat, etc.)
[post_title] => Email Uses and Security Liabilities [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => email-uses-and-security-liabilities [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:04 [post_modified_gmt] => 2021-04-13 00:06:04 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1238 [menu_order] => 840 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [29] => WP_Post Object ( [ID] => 1243 [post_author] => 26 [post_date] => 2011-06-13 07:00:18 [post_date_gmt] => 2011-06-13 07:00:18 [post_content] => Many IT folks know that regardless of their respective fields the "unofficial" eighth and ninth layers of the OSI model are budget and politics.  Healthcare is no different, and some may argue that healthcare has more stringent competition within the "budget" layer.  With limited funds and many demands, organizations are faced with balancing all needs stemming from internal and external pressures.  As a result some sought after security products get delayed or outright shelved until the next fiscal year when it can compete again. Short of a divining rod or a scrying pool, it's difficult to know what the top pressures or concerns may be.  Luckily groups like the Managed Care Executive Group (MCEG) publish their Top 10 issues collected from healthcare leaders across the country.  Not surprisingly many elements on the list discuss points of fiscal sustainability as it relates to funding from sources such as Medicare and Medicaid, and why wouldn't it?  If an organization isn't able to make money then the security posture won't matter soon enough. From a security perspective some interesting elements are found within number 7 - Health Information Exchanges.  It briefly hits on security where, "HIE's, in many cases, are being launched under time pressures by relatively inexperienced and under-resourced groups, exposing a lot of data to misuse and/or errors."  At number seven in the list of ten we finally get to potential PHI breach concerns.  Even so, it doesn't outright mention HIPAA, HITECH, nor the Health and Human Services (HHS) Office of Civil Rights (OCR). With the OCR increasing enforcement of HIPAA and HITECH regulations and recent fines and penalties this year totaling over $5 million ($4.3 and $1 respectively), this is a little surprising.  Many agree that the OCR is finding its footing in enforcement and their momentum is only going to increase.  I don't know a lot of organizations that can pay such fines and the corresponding costs of immediate internal corrective actions (let alone the Public Relations costs) without too much concern. How does this help the resource-strapped healthcare organization?  The actions that precipitated these fines weren't ground-breaking hacks.  They were procedural issues that could have been addressed early and are all part of an environment that secures and protects patient privacy; the goal of HIPAA/HITECH and other requirements found in PCI. Looking at the details of the OCR issues and knowing those top concerns may help reprioritize security.  Even those in a resource-strained company can benefit by using the recent OCR actions and by focusing initially on non-product based solutions that are no-to-low cost (such as policies and procedural changes, staff training, etc) and thus the foundational elements of a sound security posture.  Once those are solidified it makes it easier for those shelved security products to get dusted off and receive the green light.  Resources: Managed Care Executive Group - https://www.mceg.net HHS Office of Civil Rights - https://www.hhs.gov/ocr/privacy/hipaa/news/index.html [post_title] => Prioritization for Healthcare Executives [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => prioritization-for-healthcare-executives [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:12 [post_modified_gmt] => 2021-04-13 00:06:12 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1243 [menu_order] => 844 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [30] => WP_Post Object ( [ID] => 1245 [post_author] => 26 [post_date] => 2011-06-02 07:00:18 [post_date_gmt] => 2011-06-02 07:00:18 [post_content] => With ambiguity over the definition of ‘creditor’ as it relates to the healthcare environment the American Medical Association (AMA) along with others cried “foul” and threw their challenge flag regarding the FTC’s Red Flag Rule. While the AMA is not against protecting patient’s privacy, details within the regulations caused some turmoil as to how this would disrupt physician practices. Delayed implementation and one lawsuit later saw the deadlines continued to get pushed back. Then in December 2010 Congress passed, and Obama signed into law, the “Red Flag Program Clarification Act of 2010.” Clarifying what creditor means, it essentially removed physicians from under the Red Flag Program. Even with this, it doesn’t mean healthcare can just ignore identify theft issues.  Even the AMA agrees. While they don’t think most physicians will fall under the redefined categories of ‘creditor’ it does provide some Red Flag Rule Guidance, sample policy, and FAQ  on their website (AMA membership required). Every organization can benefit from an identity theft prevention program and healthcare is no exception. In fact the majority of privacy breach violations are prosecuted under HIPAA anyways. With the loss of regulatory deadlines, the urgency to implement programs “formally known as Red Flag” seems to be faltering in some healthcare institutions. However the benefits of a successfully implemented identity theft program may limit losses and even gain consumer/patient confidence. With losses occurring due to bad debt and denial of payment false pretenses of identity theft (otherwise known as “I don’t want people to know it was me that was sent to the ED passed out”) a program can help successfully defend revenue recapture efforts. It also helps to curtail medical errors when individuals attempt to use another person’s medical records/insurance to obtain treatment or are merely drug seeking.  Anyway it’s sliced an identity theft program will aid any organization and many healthcare organizations are continuing forward with their programs regardless of where they are in their implementation. While the FTC continues to offer guidance HITRUST may interest healthcare organizations directly with its Common Security Framework (CSF) that has continued to gain momentum in offering a validation tool to not just Red Flag but also HIPAA and other requirements.  For those healthcare environments that have quantified the costs of resolving identity theft claims (both legitimate and not), they realize a little preventative medicine is worth it. I don’t need to remind those in healthcare that while that annual influenza shot may sting a little when you get it, it’s worth it in the long run. [post_title] => Healthcare and Identity Theft Programs [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => healthcare-and-identity-theft-programs [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:05 [post_modified_gmt] => 2021-04-13 00:06:05 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1245 [menu_order] => 846 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [31] => WP_Post Object ( [ID] => 1250 [post_author] => 26 [post_date] => 2011-03-03 07:00:41 [post_date_gmt] => 2011-03-03 07:00:41 [post_content] => Maybe, maybe not. If you are a payment application vendor, are you worried about the changes that have happened with the new release of the Payment Application Data Security Standard (PA-DSS)? For the most part, the requirements have not changed but there are a couple of items that may require some changes in the application, the documentation, or even the processes around the application.

Storing sensitive authentication data

In PA-DSS version 1.2, it was not acceptable to store authentication data (i.e. track 1 data, CVV, etc.). The revision for PA-DSS version 2.0 now allows for sensitive authentication data (track1, track 2, CVV) to be stored but only if there is sufficient business justification and the data is stored securely. This is only for card issuers and companies that support issuing processing. It has never been permissible for merchants to store this information even if encrypted. During the testing portion of the audit, the auditor will be testing for sensitive authentication data using forensic methods. The auditor will also verify that the application is intended for card issuers and/or companies that support issuing services.

Auditing

One of the changes to PA-DSS is that the application needs to support centralized auditing. This means the audit data must be able to be moved to a centralized log server (i.e. syslog-ng, Windows Event Logs). During the testing portion of the audit, the auditor is going to have to see that the lab has a centralized log sever configured and that the application logs are moving to this server. The PA-DSS Implementation Guide also has to provide instructions and procedures for incorporating the logs into a centralized logging environment.

One less requirement

As a final note, there is one less requirement. Requires 10 and 11 have been merged, instead of having two separate requirements, one for the merchant and one for the payment application vendor, there is now only one requirement covering remote access to the payment application.

Conclusion

The PA-DSS version 2.0 requirements, in most cases, are clearer. It makes it easier for payment application vendors to understand the requirements and to pass the audit. [post_title] => Big Changes in PA-DSS v2.0 [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => big-changes-in-pa-dss-v2-0 [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:00 [post_modified_gmt] => 2021-04-13 00:06:00 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1250 [menu_order] => 851 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [32] => WP_Post Object ( [ID] => 1254 [post_author] => 26 [post_date] => 2010-12-30 07:00:41 [post_date_gmt] => 2010-12-30 07:00:41 [post_content] => One typical question NetSPI receives from IT managers is "What does PA-DSS entail?" Hopefully, this will give you some answers.

PA-DSS

PA-DSS is a set of security practices and requirements developed by the PCI Security Standards Council to "...enhance payment account data security by driving education and awareness of the PCI Security Standards.[1]" The goal of PA-DSS is to help software vendors and others develop secure payment applications that do not store prohibited data, such as full magnetic stripe, CVV2 or PIN data, and ensure their payment applications support compliance with the PCI DSS. Payment applications that are sold, distributed or licensed to third parties are subject to the PA-DSS requirements.[2] By ensuring the compliance of your application with PA-DSS requirements, your company   helps facilitate its customers' PCI DSS compliance.

NetSPI's Answer

NetSPI has developed a program guide to assist your company in getting payment applications validated. This guide prepares a company to get ready for the audit and allows them to better understand the requirements of the different pieces of the audit. These include the documentation requirements for the implementation guide, troubleshooting procedures, SDLC documentation including change control, vulnerability and software patching procedures and the training materials that are required.  It also goes into the topic of the interviews that will occur as well as the testing of the application. What the program guide does not do is tell the different people in the company what is expected of them before the audit, during audit and after the audit.  This validation process can be simple and easy or it can be long and tedious. Work with your auditor to get through the process, they have the experience to get you through the process.

Before the Audit

As a manager, there are processes that have to be planned for and started before the auditors come into your office to start the audit process. The application has to meet the PCI requirements, which include:
  • Do not retain full magnetic stripe, card validation code or value (CAV2, CID, CVC2, CVV2), or PIN block data
  • Protect stored cardholder data
  • Provide secure authentication features
  • Log payment application activity
  • Develop secure payment applications
  • Protect wireless transmissions
  • Test payment applications to address vulnerabilities
  • Facilitate secure network implementation
  • Cardholder data must never be stored on a server connected to the Internet
  • Facilitate secure remote software updates
  • Facilitate secure remote access to payment application
  • Encrypt sensitive traffic over public networks
  • Encrypt all non-console administrative access
  • Maintain instructional documentation and training programs for customers, resellers, and integrators
In addition to the application requirements, the documentation has to also be ready.The list of documentation includes:
  • Implementation guide - The most important document without which testing cannot start
  • Typical network deployment diagram and data flow diagram
  • SDLC documentation (coding standards, code review process, software testing procedures)
  • Development and test job descriptions
  • Change control procedures
  • Test/QA procedures
  • List of all custom application accounts (if applicable)
  • Web application testing procedures (if web-based application or web-based components)
  • Wireless configuration guidelines (if applicable)
  • Remote access documentation (if applicable)
  • Encryption methodology (key management, generation, storage, distribution)
  • Update process documentation
  • Documentation of remote transmission of cardholder data, such as IPSec, TLS, SSL
  • New security vulnerabilities identification process/policy documentation
In many instances, use of specific language within policies is required.  For example, the implementation guide requirements include required language, such as "Historical data (magnetic stripe data, card validation codes, PINs, or PIN blocks) MUST be removed for PCI compliance." This wording is required by the PCI Council and if not included, can provide sufficient grounds for the rejection of the Report on Validation (ROV). NetSPI's PA-DSS Program Guide has been developed expressly with intent of showing such working requirements. As shown in the list above, documentation requirements are not limited to the implementation guide and need to be completed before a ROV can be filed. It's not enough to have processes in place, such as the security coding standards; they need to be formally documented. Make sure to review the documentation requirements to make sure they are up to date. The last but far from the least important part of the pre-audit process is to educate your employees on the PCI Council's requirements for a payment application. They need to know that these requirements are not an optional part of the application and that they may be interviewed during the course of the audit. All team members should be familiar with established standards such as the SDLC documentation as well as be aware of the troubleshooting requirements as described in the process documentation.

Next Steps

The next blog entry will talk about what to expect during and after the audit.
[1] https://www.pcisecuritystandards.org/index.shtml [2] https://www.pcisecuritystandards.org/security_standards/pa_dss.shtml [post_title] => IT Manager's guide to passing the PA-DSS Audit [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => it-managers-guide-to-passing-the-pa-dss-audit [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:08 [post_modified_gmt] => 2021-04-13 00:06:08 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1254 [menu_order] => 854 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [33] => WP_Post Object ( [ID] => 1257 [post_author] => 26 [post_date] => 2010-11-23 07:00:54 [post_date_gmt] => 2010-11-23 07:00:54 [post_content] => In a previous post, I mentioned that the Security Standards Council would be releasing a new version of the Self Assessment Questionnaire (SAQ) for merchants using virtual terminal environments for processing cardholder data. Well, say hello to the SSC’s new bundle of joy; called the SAQ C-VT, this version is applicable to “merchants who process cardholder data only via isolated virtual terminals on computers connected to the Internet”. This should come as a welcome addition to merchants who developed blank stares and involuntary tics as they tried to figure out which SAQ was applicable to their browser-based terminals. Full disclosure: I am guilty of getting into heated (and very boring to anyone else) discussions about the applicability of certain SAQs in various browser-based situations; this should simplify the discourse in the future, and leave time to discuss more important matters (such as why the 2010 Vikings should be traded to the Mexican Football League). I also think that the clarifications will have some positive implications for securing virtual terminal workstations. This is important, as two typical arguments that I have run into when doing an assessment involving virtual terminals go something like:
  1. “The terminal just has a web-browser, and our payment gateway uses SSL. This workstation is out of scope and we don’t have to secure it. Antivirus is too expensive. Hold on while I install this kitten screensaver someone sent me through my AOL account on this terminal”.
  2. “We don’t know how to deal with this PC, so we are cutting our company’s Christmas bonuses to pay for locking it down. We hired a team of InfoSec PhDs to harden the workstation and we have moved it into its own datacenter. To access it you have to go through a screening process that was rejected by the TSA for being too intrusive.”
To address the lines of thinking above, the SSC created the SAQ C-VT, which makes it pretty clear that virtual terminal workstations must be segmented and secured; the SAQ C-VT also provides streamlined requirements that are much more aligned with a virtual terminal’s function and typical configuration. For SAQ C-VT eligibility, a merchant must meet the following criteria:
  • Merchant’s only payment processing is via a virtual terminal accessed by an Internet-connected web browser;
  • Merchant accesses the virtual terminal via a computer that is isolated in a single location, and is not connected to other locations or systems within your environment;
  • Merchant’s virtual terminal solution is provided and hosted by a PCI DSS validated third party service provider;
  • Merchant’s computer does not have software installed that causes cardholder data to be stored (for example, there is no software for batch processing or store-and-forward);
  • Merchant’s computer does not have any attached hardware devices that are used to capture or store cardholder data (for example, there are no card readers attached);
  • Merchant does not otherwise receive or transmit cardholder data electronically through any channels (for example, via an internal network or the Internet);
  • Merchant does not store cardholder data in electronic format (for example, cardholder data is not stored in sales and marketing tools such as CRM); and
  • If merchant does store cardholder data, such data is only in paper reports or copies of paper receipts and is not received electronically.
The full SAQ C (v 2.0) contains 80 requirements; the SAQ C-VT has a trimmed down 51. The changes are summarized below:
  • In SAQ C-VT, a personal firewall is required
  • The requirements to encrypt non-console access have been removed
  • There are no longer 3.x requirements for storing authentication data, as you wouldn’t be doing this in a VT situation
  • Encryption strength and security protocol configuration requirements are removed (4.1(c) and (d))
  • There are no longer requirements for encryption of wireless transmissions of cardholder data
  • Antivirus and patching requirements stay the same (as they should)
  • Remote access for vendors and two-factor authentication requirements are gone
  • Quarterly wireless scans/NAC for detection of rogue access points are gone, as well as the associated requirement for inclusion of wireless access point detection in the incident response plan
  • This is a BIG ONE: Internal and external quarterly scan requirements are gone. This in itself should make a very compelling argument to ditch the card swipe and start typing out those card numbers
  • Several requirements for ‘critical technologies’ are gone, including authentication for use of remote access, acceptable network locations for use of the technology, and automatic disconnection and activation of remote access technologies
Also, props to Branden Williams (truly a gentleman and a scholar) for being the first to make me aware of the release of the new 2.0 SAQs on his excellent blog. [post_title] => Virtual Terminals and PCI 2.0 - Introducing the SAQ C-VT [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => virtual-terminals-and-pci-2-0-introducing-the-saq-c-vt [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:16 [post_modified_gmt] => 2021-04-13 00:06:16 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1257 [menu_order] => 857 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [34] => WP_Post Object ( [ID] => 1261 [post_author] => 26 [post_date] => 2010-09-24 07:00:54 [post_date_gmt] => 2010-09-24 07:00:54 [post_content] => Some of the team from NetSPI spent the week in sunny Orlando at the 2010 PCI North American Community Meeting.  As most are aware, this year's meeting was particularly significant as a new version of the Data Security Standard, 2.0, which has now been released and effective as of 1/2011. The new standard is so advanced that it went from 1.2.1 to 2.0, incorporating a 0.7.9's worth of changes in a single revision(!). The last few days' sessions were a great opportunity to review the changes with the SSC and card brand representatives, catch up with others in the industry, and dispel rumors about the new DSS version (there will be no Requirement 13 mandating the use of ninjas to protect cardholder data, and Ouija boards cannot be used for wireless access point discovery).   It should also be noted (if my wife is reading this), there was absolutely no beer consumed at Kook's Sports Bar and all discussions were reasoned, civil discourses that ended promptly at 9:00 PM to allow for a full night's sleep. As far as the changes to the DSS, it should come as no surprise that there were not many surprises to be found.  As was pointed out several times throughout the course of our sessions, the DSS is a mature framework with a rising adoption rate throughout the world; major changes could have serious financial and operational repercussions on merchants and service providers who have already incorporated the DSS into their environments.  Keeping that in mind, the intent of v2.0 is to provide additional guidance and clarification based on the (apparently) thousands of communications that the SSC received in response to their request for feedback, and my first impression is that it succeeds in that respect.  Below are some of the highlights I picked up on from the meeting and SSC-supplied docs, in no particular order: 
  • Clarification that PAN is the primary element for applicability of the DSS to a merchant/service provider environment and is the definition of 'cardholder data'
  • Sampling requirements will be more detailed, and will require more justification as to why the sampling methodology used for an assessment is considered sufficient
  • There are clarifications for issuers that have a business need to store sensitive authentication data (SAD), which should provide more specific guidelines for retention and protection of SAD
  • Additional requirements to secure PAN if hashing and truncation are used in the same environment, to reduce the possibility of malicious users reconstructing full account numbers
  • At this point, an automated or DLP-type solution is NOT required for data discovery and scoping of the cardholder data environment, though tools of this nature can/should be used where appropriate
  • "System components" now includes virtual systems in the definition
  • Requirement 6 has been overhauled to merge internal and web applications, and "industry best practices" no longer means just "OWASP", and includes SANS, CIRT, CWE, etc.
  • News flash- two passwords are not considered "2-factor". Glad we got that one clarified.
  • Requirement 11 allows for physical site inspection for rogue AP discovery if appropriate. I can't see this working well in a large physical environment, but may work for mom-and-pop retailers who can see every wire on their router. I can't wait for my first opportunity to write a comment for 11.1 that includes "Bob, the IT guy, climbs through air ducts and drop ceilings on a quarterly basis to identify potential rogue APs"
  • IDS/IPS must be included at the perimeter of the cardholder data environment and 'key points', as opposed to monitoring all traffic in an environment
  • There was some discussion around a new SAQ C that would be applicable to 'virtual terminal' environments. This is a work in progress, and I didn't hear an official release date
There are many other tweaks not included above, but no real game-changers in my opinion. I know not everyone will be happy with all of the revisions, but the DSS is by its nature a compromise between global applicability for all types of environments and nuts-and-bolts implementation.  There will still be requirements that have QSAs and clients scratching their heads, but my impressions are that many of the clarifications are long overdue and should make many of the requirements easier to interpret, test, and enforce.  Ninjas will just have to wait for version 3; be sure to get your feedback in early. [post_title] => What’s New in PCI DSS 2.0 – No Surprise That There Are No Surprises [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => whats-new-in-pci-dss-2-0-no-surprise-that-there-are-no-surprises [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:17 [post_modified_gmt] => 2021-04-13 00:06:17 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1261 [menu_order] => 861 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [35] => WP_Post Object ( [ID] => 1263 [post_author] => 26 [post_date] => 2010-09-09 07:00:41 [post_date_gmt] => 2010-09-09 07:00:41 [post_content] => We were asked by a customer about performing code review based on the PCI requirements. The questions they asked were:
  • Is there a checklist that exists that covers all of the PCI requirements?
  • Are there requirements such as not storing PAN un-encrypted?
  • What about not storing full track data or other restricted data?
  • Are there considerations outside of OWASP?
  • Can you recommend a simple resource for all PCI-related requirements?
At this point in time, there does not seem to be a single source of reference that answers all of these questions. Since there is no definitive source, this document covers some of the PCI requirements in relation to code reviews. A code review includes reviewing all of the code for the OWASP Top 10 Web Application Security Risks for 2010. The OWASP Top 10 is inclusive of the PCI requirements and answers most if not all of the above questions. The OWASP Top 10 Web Application Security Risks for 2010:
  1. Injection flaws - such as SQL, OS and LDAP
  2. Cross-Site Scripting (XSS)
  3. Broken Authentication and Session Management
  4. Insecure Direct Object References - exposing a file, directory or database key
  5. Cross-Site Request Forgery (CSRF)
  6. Security Misconfiguration
  7. Insecure Cryptographic Storage
  8. Failure to Restrict URL Access
  9. Insufficient Transport Layer Protection
  10. Unvalidated Redirects and Forwards
Risk number 7 specifically covers the question about storing PANs unencrypted. As for storing track data, this is partially covered by risk number 7. Track data is sensitive and needs to be encrypted while stored, but it can only be stored pre-authorization. Once the transaction has been authorized, the data must be securely deleted. This requirement is not covered by the OWASP Top 10. Here is a complete list of the PCI requirements as they relate to the OWASP Top 10 (see the list above): Requirement 1: Install and maintain a firewall configuration to protect cardholder data - this requirement is not typically covered in a code review Requirement 2: Do not use vendor-supplied defaults for system passwords and other security parameters - this requirement is covered under risk number 6 Requirement 3: Protect stored cardholder data - this requirement is covered under risk number 7 Requirement 4: Encrypt transmission of cardholder data across open, public networks - this requirement is risk number 9 Requirement 5: Use and regularly update anti-virus software or programs - This requirement is not typically covered in a code review Requirement 6: Develop and maintain secure systems and applications - this is fully encompassed in the OWASP Top 10 (both 2007 and 2010 versions) Requirement 7: Restrict access to cardholder data by business need to know - this requirement is partially covered by risk number 4 Requirement 8: Assign a unique ID to each person with computer access - this requirement is not typically covered in a code review Requirement 9: Restrict physical access to cardholder data access - this requirement is not typically covered in a code review Requirement 10: Track and monitor all access to network resources and cardholder data - this requirement is not covered in the OWASP Top 10 Requirement 11: Regularly test security systems and processes access - this requirement is not typically covered in a code review Requirement 12: Maintain a policy that addresses information security for employee's and contractor's access - this requirement is not typically covered in a code review Start your code review checklist with the OWASP Code Review Guide and add to it for those requirements that are not covered by this guide. This includes securely deleting sensitive data (PANs, track data, keys, etc.) and application logging. Another place to start or append to your checklist, if you develop .Net applications, would be Microsoft's Index of Checklists . [post_title] => Performing Code Reviews to PCI Requirements [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => performing-code-reviews-to-pci-requirements [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:12 [post_modified_gmt] => 2021-04-13 00:06:12 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1263 [menu_order] => 862 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [36] => WP_Post Object ( [ID] => 1280 [post_author] => 26 [post_date] => 2010-05-05 07:00:41 [post_date_gmt] => 2010-05-05 07:00:41 [post_content] => As an organization that performs a large volume of code reviews and penetration tests, NetSPI is frequently asked which type of application assessment is the best option. Your primary options are a code review or a web application penetration test. Both are recommended and both find many of the vulnerabilities commonly found in web applications as defined by the Open Web Application Security Project (OWASP) Top 10 (https://www.owasp.org). By themselves, neither a code review nor a web application penetration test find all of the vulnerabilities that threaten the application.

Why perform them?

Many regulations either require them or highly recommend them. For example, Payment Card Industry Data Security Standard (PCI-DSS) requires that either a code review or a web application vulnerability security assessment (web application penetration test) is performed annually on any web application that stores, processes or transmits credit cards (PCI Requirement 6.6).  In addition, for payment application vendors (i.e. point-of-sale application, etc.) the PCI Payment Application Data Security Standard (PA-DSS) requires a code review and a penetration test targeting the OWASP Top 10.

Picking one over the other

Even though code reviews and web application penetration tests can find most of the same vulnerabilities, they look at the application differently and as a result their findings can differ. Typically both approaches find OWASP Top 10 issues such as SQL Injection, cross-site scripting (XSS), etc.  However, the efficiency and effectiveness how each method finds these vulnerabilities can differ. For example, code reviews are better at finding most instances of input validation issues (i.e. XSS or SQL Injection). All of the automated code scanning tools NetSPI uses trace the data within the application from its entry point to its exit point. A web application penetration test can find these instances but it could take days or weeks to prove they exist in the application.

Web application penetration testing

A web application penetration test can uncover vulnerabilities from the outside looking in. These vulnerabilities can be related to configuration or versions. An example might be an older version of Apache with a chunked encoding overflow vulnerability (https://osvdb.org/838). Web application penetration tests can also uncover vulnerabilities that are related to the operation of the application, such as default or easily guessed credentials. Other types of vulnerabilities found by a web application penetration test include:
  • Access control (forced browsing, etc.)
  • Session hijacking
  • Vulnerabilities related to business logic
In addition, web application penetration testing can find these instances easier than a code review. This being said, I am talking about a third-party code review, not a code review done by a person or person familiar with the code or the company’s development processes. Automated source code analysis tools do not find these types of vulnerabilities.  Manual testing can be done but could greatly inflate the cost of the code review.

Code Reviews

A code review looks at the application from the inside out. Vulnerabilities commonly found in a code review that cannot be easily found in an application penetration test include logging of sensitive data or application backdoors, as they are not exposed to the outside.  Other types of vulnerabilities found by a code review include:
  • Denial of services caused by not releasing resources
  • Buffer overflows
  • Missing or poor error handling
  • Dangerous functions
  • Hardcoded password or keys in the source code
  • Code implementation problems
  • Missing or poor logging

Final Thoughts

The most comprehensive approach to finding security vulnerabilities in web applications is performing both a code review and a web application penetration test. For critical applications, performing only one of these services can result in many vulnerabilities remaining within the application and unacceptable risk to the organization. [post_title] => Are You Testing Your Web Application for Vulnerabilities? [post_excerpt] => [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => are-you-testing-your-web-application-for-vulnerabilities [to_ping] => [pinged] => [post_modified] => 2021-04-13 00:06:22 [post_modified_gmt] => 2021-04-13 00:06:22 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=1280 [menu_order] => 876 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) ) [post_count] => 37 [current_post] => -1 [before_loop] => 1 [in_the_loop] => [post] => WP_Post Object ( [ID] => 7654 [post_author] => 26 [post_date] => 2017-06-06 07:00:19 [post_date_gmt] => 2017-06-06 07:00:19 [post_content] => OWASP has just released their release candidate of the Top 10 most critical web application security risks. While no major changes were included, i.e. Injection is still number one in the list, they added two new ones:
  1. A7 – Insufficient Attack Protection
  2. A10 - Under protected APIs
This blog discusses the first.

A7 – Insufficient Attack Protection

OWASP stated the reason for the addition as being: For years, we’ve considered adding insufficient defenses against automated attacks. Based on the data call, we see that the majority of applications and APIs lack basic capabilities to detect, prevent, and respond to both manual and automated attacks. Application and API owners also need to be able to deploy patches quickly to protect against attacks. An application must protect itself against attacks not just from invalid input, but also involved detecting and blocking attempts to exploit the security vulnerabilities. The application must try to detect and prevent them, log these attempts and respond to them. What are some examples of attacks that should be handled?
  • Brute force attacks to guess user credentials
  • Flooding user’s email accounts using email forms in the application
  • Attempting to determined valid credit card numbers from stolen cards
  • Denial of service by flooding the application with many requests
  • XSS or SQL Injection attacks by automated tools
A more complete list can here found here but the ways in which they are handled are all very similar.

Prevention

The first step is to prevent these types of attacks. Consider using some built-in steps for preventing attacks to the application. This includes:
  • Remove or limit the values of the data accessed using the application; can it be changed, masked or removed?
  • Create use (abuse) cases that simulate automated web attacks.
  • Identify and restrict automated attacks by identifying automation techniques to determine is the requests are being made by a human or by an automated tool.
  • Make sure the user is authenticated and authorized to use the application.
  • Consider using CAPTCHA when high value functions are being performed.
  • Set limits on how many transaction can be performed over a specified time; consider doing this by user or groups of users, devices or IP address.
  • Consider the use of web application firewalls that detect these types of attacks. Another alternative is using OWASP AppSensor or similar; it is built into the application to detect these types of attacks.
  • Build conditions into your terms and conditions; require the user not to use automated tools when using the application.
Other items to consider to use include:
  • Networks firewalls
  • Load balancers
  • Anti-DDoS systems
  • Intrusion Detection System (IDS) and Intrusion Prevention System (IPS)
  • Data Loss Prevention

Detection

An application must determine if activity is an attack or just suspicious.  The response must be appropriate based on which of these is true.
  • Could the user have made a mistake when entering the data?
  • Is the process being followed for the application or is the user trying to jump past steps in the process?
  • Does the user need a special tool or knowledge?
If any two of the above items are true, then it is most likely an attack and not suspicious activity. Is it possible that the requests are coming in at a very high rate? A typical user may make one request every couple of seconds, whereas a tool such as Burp Suite Pro or WebInspect may make many more requests per second. The application should also detect these types of attack or attempts to find vulnerabilities in the application.

Response

The application can handle detected attacks or even the suspicion of attacks in a variety of ways. The first should be a warning to the user. This will deter a normal user that their activities are being monitored. It would also warn a malicious user that certain events are being monitored, though it will probably not deter the latter person. The application could, based on further activity after the warning, either logout the user or lockout the user. If a logout is performed, automated tools can be programed to automatically re-authenticate the user. If lockout is chosen, then all activity will stop. In any of the above cases, a legitimate user may end up calling the help desk, so the application must log this type of activity and notify the application’s administrators. The log must be reviewed to determine if the response was appropriate. Choosing which action to perform would depend on the sensitivity of the data within the application. A public website must be more forgiving to prevent overreaction to suspicious activities; whereas an application with highly sensitive data must respond quickly to suspicious activity.

Conclusion

The OWASP Top 10 2017 A7 – Insufficient Attack Protection requires the application to prevent, detect, and respond to attacks. This could affect other regulations such as PCI, which base their standards on the OWASP Top 10.

References

[post_title] => Application Self Protection - A New Addition to the OWASP Top 10 [post_excerpt] => OWASP has just released their release candidate of the Top 10 most critical web application security risks. While no major changes were included, they added two new ones. This blog discusses the first one in the list: A7 – Insufficient Attack Protection [post_status] => publish [comment_status] => closed [ping_status] => closed [post_password] => [post_name] => application-self-protection [to_ping] => [pinged] => [post_modified] => 2023-03-16 09:30:50 [post_modified_gmt] => 2023-03-16 14:30:50 [post_content_filtered] => [post_parent] => 0 [guid] => https://netspiblogdev.wpengine.com/?p=7654 [menu_order] => 628 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [comment_count] => 0 [current_comment] => -1 [found_posts] => 37 [max_num_pages] => 0 [max_num_comment_pages] => 0 [is_single] => [is_preview] => [is_page] => [is_archive] => [is_date] => [is_year] => [is_month] => [is_day] => [is_time] => [is_author] => [is_category] => [is_tag] => [is_tax] => [is_search] => [is_feed] => [is_comment_feed] => [is_trackback] => [is_home] => 1 [is_privacy_policy] => [is_404] => [is_embed] => [is_paged] => [is_admin] => [is_attachment] => [is_singular] => [is_robots] => [is_favicon] => [is_posts_page] => [is_post_type_archive] => [query_vars_hash:WP_Query:private] => 1d01ddc60905d18a7866d85ebd70a221 [query_vars_changed:WP_Query:private] => [thumbnails_cached] => [allow_query_attachment_by_filename:protected] => [stopwords:WP_Query:private] => [compat_fields:WP_Query:private] => Array ( [0] => query_vars_hash [1] => query_vars_changed ) [compat_methods:WP_Query:private] => Array ( [0] => init_query_flags [1] => parse_tax_query ) )

Discover how the NetSPI BAS solution helps organizations validate the efficacy of existing security controls and understand their Security Posture and Readiness.

X