Statistics.py 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039
  1. # Aidmar
  2. from operator import itemgetter
  3. from math import sqrt, ceil, log
  4. import os
  5. import time
  6. import ID2TLib.libpcapreader as pr
  7. import matplotlib
  8. matplotlib.use('Agg')
  9. import matplotlib.pyplot as plt
  10. from ID2TLib.PcapFile import PcapFile
  11. from ID2TLib.StatsDatabase import StatsDatabase
  12. class Statistics:
  13. def __init__(self, pcap_file: PcapFile):
  14. """
  15. Creates a new Statistics object.
  16. :param pcap_file: A reference to the PcapFile object
  17. """
  18. # Fields
  19. self.pcap_filepath = pcap_file.pcap_file_path
  20. self.pcap_proc = None
  21. # Aidmar
  22. self.do_tests = False
  23. # Create folder for statistics database if required
  24. self.path_db = pcap_file.get_db_path()
  25. path_dir = os.path.dirname(self.path_db)
  26. if not os.path.isdir(path_dir):
  27. os.makedirs(path_dir)
  28. # Class instances
  29. self.stats_db = StatsDatabase(self.path_db)
  30. def load_pcap_statistics(self, flag_write_file: bool, flag_recalculate_stats: bool, flag_print_statistics: bool):
  31. """
  32. Loads the PCAP statistics for the file specified by pcap_filepath. If the database is not existing yet, the
  33. statistics are calculated by the PCAP file processor and saved into the newly created database. Otherwise the
  34. statistics are gathered directly from the existing database.
  35. :param flag_write_file: Indicates whether the statistics should be written addiotionally into a text file (True)
  36. or not (False)
  37. :param flag_recalculate_stats: Indicates whether eventually existing statistics should be recalculated
  38. :param flag_print_statistics: Indicates whether the gathered basic statistics should be printed to the terminal
  39. """
  40. # Load pcap and get loading time
  41. time_start = time.clock()
  42. # Inform user about recalculation of statistics and its reason
  43. if flag_recalculate_stats:
  44. print("Flag -r/--recalculate found. Recalculating statistics.")
  45. # Recalculate statistics if database not exists OR param -r/--recalculate was provided
  46. if (not self.stats_db.get_db_exists()) or flag_recalculate_stats:
  47. self.pcap_proc = pr.pcap_processor(self.pcap_filepath, str(self.do_tests)) # Aidmar - do_tests
  48. self.pcap_proc.collect_statistics()
  49. self.pcap_proc.write_to_database(self.path_db)
  50. outstring_datasource = "by PCAP file processor."
  51. else:
  52. outstring_datasource = "from statistics database."
  53. # Load statistics from database
  54. self.file_info = self.stats_db.get_file_info()
  55. time_end = time.clock()
  56. print("Loaded file statistics in " + str(time_end - time_start)[:4] + " sec " + outstring_datasource)
  57. # Write statistics if param -e/--export provided
  58. if flag_write_file:
  59. self.write_statistics_to_file()
  60. # Print statistics if param -s/--statistics provided
  61. if flag_print_statistics:
  62. self.print_statistics()
  63. def get_file_information(self):
  64. """
  65. Returns a list of tuples, each containing a information of the file.
  66. :return: a list of tuples, each consisting of (description, value, unit), where unit is optional.
  67. """
  68. return [("Pcap file", self.pcap_filepath),
  69. ("#Packets", self.get_packet_count(), "packets"),
  70. ("Capture length", self.get_capture_duration(), "seconds"),
  71. ("Capture start", self.get_pcap_timestamp_start()),
  72. ("Capture end", self.get_pcap_timestamp_end())]
  73. def get_general_file_statistics(self):
  74. """
  75. Returns a list of tuples, each containing a file statistic.
  76. :return: a list of tuples, each consisting of (description, value, unit).
  77. """
  78. return [("Avg. packet rate", self.file_info['avgPacketRate'], "packets/sec"),
  79. ("Avg. packet size", self.file_info['avgPacketSize'], "kbytes"),
  80. ("Avg. packets sent", self.file_info['avgPacketsSentPerHost'], "packets"),
  81. ("Avg. bandwidth in", self.file_info['avgBandwidthIn'], "kbit/s"),
  82. ("Avg. bandwidth out", self.file_info['avgBandwidthOut'], "kbit/s")]
  83. @staticmethod
  84. def write_list(desc_val_unit_list, func, line_ending="\n"):
  85. """
  86. Takes a list of tuples (statistic name, statistic value, unit) as input, generates a string of these three values
  87. and applies the function func on this string.
  88. Before generating the string, it identifies text containing a float number, casts the string to a
  89. float and rounds the value to two decimal digits.
  90. :param desc_val_unit_list: The list of tuples consisting of (description, value, unit)
  91. :param func: The function to be applied to each generated string
  92. :param line_ending: The formatting string to be applied at the end of each string
  93. """
  94. for entry in desc_val_unit_list:
  95. # Convert text containing float into float
  96. (description, value) = entry[0:2]
  97. if isinstance(value, str) and "." in value:
  98. try:
  99. value = float(value)
  100. except ValueError:
  101. pass # do nothing -> value was not a float
  102. # round float
  103. if isinstance(value, float):
  104. value = round(value, 4)
  105. # write into file
  106. if len(entry) == 3:
  107. unit = entry[2]
  108. func(description + ":\t" + str(value) + " " + unit + line_ending)
  109. else:
  110. func(description + ":\t" + str(value) + line_ending)
  111. def print_statistics(self):
  112. """
  113. Prints the basic file statistics to the terminal.
  114. """
  115. print("\nPCAP FILE INFORMATION ------------------------------")
  116. Statistics.write_list(self.get_file_information(), print, "")
  117. print("\nGENERAL FILE STATISTICS ----------------------------")
  118. Statistics.write_list(self.get_general_file_statistics(), print, "")
  119. print("\n")
  120. #Aidmar
  121. def calculate_entropy(self, data, frequency):
  122. entropy = 0
  123. sumFreq = sum(frequency)
  124. for i, x in enumerate(data):
  125. p_x = float(frequency[i] / sumFreq)
  126. if p_x > 0:
  127. entropy += - p_x * log(p_x, 2)
  128. return entropy
  129. # Aidmar
  130. def get_tests_statistics(self):
  131. """
  132. Writes the calculated basic defects tests statistics into a file.
  133. """
  134. # self.stats_db._process_user_defined_query output is list of tuples, thus, we ned [0][0] to access data
  135. sumPayloadCount = self.stats_db._process_user_defined_query("SELECT sum(payloadCount) FROM interval_statistics")
  136. pktCount = self.stats_db._process_user_defined_query("SELECT packetCount FROM file_statistics")
  137. payloadRatio=0
  138. if(pktCount[0][0]!=0):
  139. payloadRatio = float(sumPayloadCount[0][0] / pktCount[0][0] * 100)
  140. incorrectChecksumCount = self.stats_db._process_user_defined_query("SELECT sum(incorrectTCPChecksumCount) FROM interval_statistics")
  141. correctChecksumCount = self.stats_db._process_user_defined_query("SELECT avg(correctTCPChecksumCount) FROM interval_statistics")
  142. incorrectChecksumRatio=0
  143. if(incorrectChecksumCount[0][0] + correctChecksumCount[0][0])!=0:
  144. incorrectChecksumRatio = float(incorrectChecksumCount[0][0] / (incorrectChecksumCount[0][0] + correctChecksumCount[0][0] ) * 100)
  145. def calc_normalized_avg(valuesList):
  146. sumValues = 0
  147. for x in valuesList: sumValues += x[0]
  148. normalizedValues = []
  149. for row in valuesList:
  150. normalizedValues.append(float(row[0] ))#/ sumValues))
  151. return float(sum(normalizedValues) / len(normalizedValues))
  152. newIPCount = self.stats_db._process_user_defined_query("SELECT newIPCount FROM interval_statistics")
  153. avgNewIPCount = calc_normalized_avg(newIPCount)
  154. result = self.stats_db._process_user_defined_query("SELECT ipSrcCumEntropy FROM interval_statistics")
  155. ipSrcEntropy = result[-1][0]
  156. result = self.stats_db._process_user_defined_query("SELECT ipDstCumEntropy FROM interval_statistics")
  157. ipDstEntropy = result[-1][0]
  158. newTTLCount = self.stats_db._process_user_defined_query("SELECT newTTLCount FROM interval_statistics")
  159. avgNewTTLCount = calc_normalized_avg(newTTLCount)
  160. result = self.stats_db._process_user_defined_query("SELECT ttlValue,SUM(ttlCount) FROM ip_ttl GROUP BY ttlValue")
  161. data, frequency = [], []
  162. for row in result:
  163. data.append(row[0])
  164. frequency.append(row[1])
  165. ttlEntopy = self.calculate_entropy(data,frequency)
  166. newWinSizeCount = self.stats_db._process_user_defined_query("SELECT newWinSizeCount FROM interval_statistics")
  167. avgNewWinCount = calc_normalized_avg(newWinSizeCount)
  168. result = self.stats_db._process_user_defined_query("SELECT winSize,SUM(winCount) FROM tcp_syn_win GROUP BY winSize")
  169. data, frequency = [], []
  170. for row in result:
  171. data.append(row[0])
  172. frequency.append(row[1])
  173. winEntopy = self.calculate_entropy(data, frequency)
  174. newToSCount = self.stats_db._process_user_defined_query("SELECT newToSCount FROM interval_statistics")
  175. avgNewToSCount = calc_normalized_avg(newToSCount)
  176. result = self.stats_db._process_user_defined_query(
  177. "SELECT tosValue,SUM(tosCount) FROM ip_tos GROUP BY tosValue")
  178. data, frequency = [], []
  179. for row in result:
  180. data.append(row[0])
  181. frequency.append(row[1])
  182. tosEntopy = self.calculate_entropy(data, frequency)
  183. newMSSCount = self.stats_db._process_user_defined_query("SELECT newMSSCount FROM interval_statistics")
  184. avgNewMSSCount = calc_normalized_avg(newMSSCount)
  185. result = self.stats_db._process_user_defined_query(
  186. "SELECT mssValue,SUM(mssCount) FROM tcp_mss_dist GROUP BY mssValue")
  187. data, frequency = [], []
  188. for row in result:
  189. data.append(row[0])
  190. frequency.append(row[1])
  191. mssEntopy = self.calculate_entropy(data, frequency)
  192. return [("Payload ratio", payloadRatio, "%"),
  193. ("Incorrect TCP checksum ratio", incorrectChecksumRatio, "%"),
  194. ("Avg. new IP", avgNewIPCount, ""),
  195. ("IP Src Entropy", ipSrcEntropy, ""),
  196. ("IP Dst Entropy", ipDstEntropy, ""),
  197. ("Avg. new TTL", avgNewTTLCount, ""),
  198. ("TTL Entropy", ttlEntopy, ""),
  199. ("Avg. new WinSize", avgNewWinCount, ""),
  200. ("WinSize Entropy", winEntopy, ""),
  201. ("Avg. new ToS", avgNewToSCount, ""),
  202. ("ToS Entropy", tosEntopy, ""),
  203. ("Avg. new MSS", avgNewMSSCount, ""),
  204. ("MSS Entropy", mssEntopy, "")]
  205. def write_statistics_to_file(self):
  206. """
  207. Writes the calculated basic statistics into a file.
  208. """
  209. def _write_header(title: str):
  210. """
  211. Writes the section header into the open file.
  212. :param title: The section title
  213. """
  214. target.write("====================== \n")
  215. target.write(title + " \n")
  216. target.write("====================== \n")
  217. target = open(self.pcap_filepath + ".stat", 'w')
  218. target.truncate()
  219. _write_header("PCAP file information")
  220. Statistics.write_list(self.get_file_information(), target.write)
  221. _write_header("General statistics")
  222. Statistics.write_list(self.get_general_file_statistics(), target.write)
  223. _write_header("Tests statistics")
  224. Statistics.write_list(self.get_tests_statistics(), target.write)
  225. target.close()
  226. def get_capture_duration(self):
  227. """
  228. :return: The duration of the capture in seconds
  229. """
  230. return self.file_info['captureDuration']
  231. def get_pcap_timestamp_start(self):
  232. """
  233. :return: The timestamp of the first packet in the PCAP file
  234. """
  235. return self.file_info['timestampFirstPacket']
  236. def get_pcap_timestamp_end(self):
  237. """
  238. :return: The timestamp of the last packet in the PCAP file
  239. """
  240. return self.file_info['timestampLastPacket']
  241. def get_pps_sent(self, ip_address: str):
  242. """
  243. Calculates the sent packets per seconds for a given IP address.
  244. :param ip_address: The IP address whose packets per second should be calculated
  245. :return: The sent packets per seconds for the given IP address
  246. """
  247. packets_sent = self.stats_db.process_db_query("SELECT pktsSent from ip_statistics WHERE ipAddress=?", False,
  248. (ip_address,))
  249. capture_duration = float(self.get_capture_duration())
  250. return int(float(packets_sent) / capture_duration)
  251. def get_pps_received(self, ip_address: str):
  252. """
  253. Calculate the packets per second received for a given IP address.
  254. :param ip_address: The IP address used for the calculation
  255. :return: The number of packets per second received
  256. """
  257. packets_received = self.stats_db.process_db_query("SELECT pktsReceived FROM ip_statistics WHERE ipAddress=?",
  258. False,
  259. (ip_address,))
  260. capture_duration = float(self.get_capture_duration())
  261. return int(float(packets_received) / capture_duration)
  262. def get_packet_count(self):
  263. """
  264. :return: The number of packets in the loaded PCAP file
  265. """
  266. return self.file_info['packetCount']
  267. def get_most_used_ip_address(self):
  268. """
  269. :return: The IP address/addresses with the highest sum of packets sent and received
  270. """
  271. return self.process_db_query("most_used(ipAddress)")
  272. def get_ttl_distribution(self, ipAddress: str):
  273. result = self.process_db_query('SELECT ttlValue, ttlCount from ip_ttl WHERE ipAddress="' + ipAddress + '"')
  274. result_dict = {key: value for (key, value) in result}
  275. return result_dict
  276. def get_random_ip_address(self, count: int = 1):
  277. """
  278. :param count: The number of IP addreses to return
  279. :return: A randomly chosen IP address from the dataset or iff param count is greater than one, a list of randomly
  280. chosen IP addresses
  281. """
  282. if count == 1:
  283. return self.process_db_query("random(all(ipAddress))")
  284. else:
  285. ip_address_list = []
  286. for i in range(0, count):
  287. ip_address_list.append(self.process_db_query("random(all(ipAddress))"))
  288. return ip_address_list
  289. def get_mac_address(self, ipAddress: str):
  290. """
  291. :return: The MAC address used in the dataset for the given IP address.
  292. """
  293. return self.process_db_query('macAddress(ipAddress=' + ipAddress + ")")
  294. # Aidmar - comment out
  295. # def get_mss(self, ipAddress: str):
  296. # """
  297. # :param ipAddress: The IP address whose used MSS should be determined
  298. # :return: The TCP MSS value used by the IP address, or if the IP addresses never specified a MSS,
  299. # then None is returned
  300. # """
  301. # mss_value = self.process_db_query('SELECT mss from tcp_mss WHERE ipAddress="' + ipAddress + '"')
  302. # if isinstance(mss_value, int):
  303. # return mss_value
  304. # else:
  305. # return None
  306. # Aidmar
  307. def get_most_used_mss(self, ipAddress: str):
  308. """
  309. :param ipAddress: The IP address whose used MSS should be determined
  310. :return: The TCP MSS value used by the IP address, or if the IP addresses never specified a MSS,
  311. then None is returned
  312. """
  313. mss_value = self.process_db_query('SELECT mssValue from tcp_mss_dist WHERE ipAddress="' + ipAddress + '" ORDER BY mssCount DESC LIMIT 1')
  314. if isinstance(mss_value, int):
  315. return mss_value
  316. else:
  317. return None
  318. def get_statistics_database(self):
  319. """
  320. :return: A reference to the statistics database object
  321. """
  322. return self.stats_db
  323. def process_db_query(self, query_string_in: str, print_results: bool = False):
  324. """
  325. Executes a string identified previously as a query. This can be a standard SQL SELECT/INSERT query or a named
  326. query.
  327. :param query_string_in: The query to be processed
  328. :param print_results: Indicates whether the results should be printed to terminal
  329. :return: The result of the query
  330. """
  331. return self.stats_db.process_db_query(query_string_in, print_results)
  332. def is_query(self, value: str):
  333. """
  334. Checks whether the given string is a standard SQL query (SELECT, INSERT) or a named query.
  335. :param value: The string to be checked
  336. :return: True if the string is recognized as a query, otherwise False.
  337. """
  338. if not isinstance(value, str):
  339. return False
  340. else:
  341. return (any(x in value.lower().strip() for x in self.stats_db.get_all_named_query_keywords()) or
  342. any(x in value.lower().strip() for x in self.stats_db.get_all_sql_query_keywords()))
  343. # Aidmar
  344. def calculate_standard_deviation(self, lst):
  345. """Calculates the standard deviation for a list of numbers."""
  346. num_items = len(lst)
  347. mean = sum(lst) / num_items
  348. differences = [x - mean for x in lst]
  349. sq_differences = [d ** 2 for d in differences]
  350. ssd = sum(sq_differences)
  351. variance = ssd / num_items
  352. sd = sqrt(variance)
  353. #print('The mean of {} is {}.'.format(lst, mean))
  354. #print('The differences are {}.'.format(differences))
  355. #print('The sum of squared differences is {}.'.format(ssd))
  356. #print('The variance is {}.'.format(variance))
  357. print('The standard deviation is {}.'.format(sd))
  358. print('--------------------------')
  359. return sd
  360. def plot_statistics(self, format: str = 'pdf'): #'png'):
  361. """
  362. Plots the statistics associated with the dataset prior attack injection.
  363. :param format: The format to be used to save the statistics diagrams.
  364. """
  365. def plot_ttl(file_ending: str):
  366. plt.gcf().clear()
  367. result = self.stats_db._process_user_defined_query(
  368. "SELECT ttlValue, SUM(ttlCount) FROM ip_ttl GROUP BY ttlValue")
  369. graphx, graphy = [], []
  370. for row in result:
  371. graphx.append(row[0])
  372. graphy.append(row[1])
  373. plt.autoscale(enable=True, axis='both')
  374. plt.title("TTL Distribution")
  375. plt.xlabel('TTL Value')
  376. plt.ylabel('Number of Packets')
  377. width = 0.1
  378. plt.xlim([0, max(graphx)])
  379. plt.grid(True)
  380. plt.bar(graphx, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  381. out = self.pcap_filepath.replace('.pcap', '_plot-ttl' + file_ending)
  382. plt.savefig(out,dpi=500)
  383. return out
  384. # Aidmar
  385. def plot_mss(file_ending: str):
  386. plt.gcf().clear()
  387. result = self.stats_db._process_user_defined_query(
  388. "SELECT mssValue, SUM(mssCount) FROM tcp_mss_dist GROUP BY mssValue")
  389. if(result):
  390. graphx, graphy = [], []
  391. for row in result:
  392. graphx.append(row[0])
  393. graphy.append(row[1])
  394. plt.autoscale(enable=True, axis='both')
  395. plt.title("MSS Distribution")
  396. plt.xlabel('MSS Value')
  397. plt.ylabel('Number of Packets')
  398. width = 0.1
  399. plt.xlim([0, max(graphx)])
  400. plt.grid(True)
  401. plt.bar(graphx, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  402. out = self.pcap_filepath.replace('.pcap', '_plot-mss' + file_ending)
  403. plt.savefig(out,dpi=500)
  404. return out
  405. else:
  406. print("Error plot MSS: No MSS values found!")
  407. # Aidmar
  408. def plot_win(file_ending: str):
  409. plt.gcf().clear()
  410. result = self.stats_db._process_user_defined_query(
  411. "SELECT winSize, SUM(winCount) FROM tcp_syn_win GROUP BY winSize")
  412. if (result):
  413. graphx, graphy = [], []
  414. for row in result:
  415. graphx.append(row[0])
  416. graphy.append(row[1])
  417. plt.autoscale(enable=True, axis='both')
  418. plt.title("Window Size Distribution")
  419. plt.xlabel('Window Size')
  420. plt.ylabel('Number of Packets')
  421. width = 0.1
  422. plt.xlim([0, max(graphx)])
  423. plt.grid(True)
  424. plt.bar(graphx, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  425. out = self.pcap_filepath.replace('.pcap', '_plot-win' + file_ending)
  426. plt.savefig(out,dpi=500)
  427. return out
  428. else:
  429. print("Error plot WinSize: No WinSize values found!")
  430. # Aidmar
  431. def plot_protocol(file_ending: str):
  432. plt.gcf().clear()
  433. result = self.stats_db._process_user_defined_query(
  434. "SELECT protocolName, SUM(protocolCount) FROM ip_protocols GROUP BY protocolName")
  435. if (result):
  436. graphx, graphy = [], []
  437. for row in result:
  438. graphx.append(row[0])
  439. graphy.append(row[1])
  440. plt.autoscale(enable=True, axis='both')
  441. plt.title("Protocols Distribution")
  442. plt.xlabel('Protocols')
  443. plt.ylabel('Number of Packets')
  444. width = 0.1
  445. plt.xlim([0, len(graphx)])
  446. plt.grid(True)
  447. # Protocols' names on x-axis
  448. x = range(0,len(graphx))
  449. my_xticks = graphx
  450. plt.xticks(x, my_xticks)
  451. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  452. out = self.pcap_filepath.replace('.pcap', '_plot-protocol' + file_ending)
  453. plt.savefig(out,dpi=500)
  454. return out
  455. else:
  456. print("Error plot protocol: No protocol values found!")
  457. # Aidmar
  458. def plot_port(file_ending: str):
  459. plt.gcf().clear()
  460. result = self.stats_db._process_user_defined_query(
  461. "SELECT portNumber, SUM(portCount) FROM ip_ports GROUP BY portNumber")
  462. graphx, graphy = [], []
  463. for row in result:
  464. graphx.append(row[0])
  465. graphy.append(row[1])
  466. plt.autoscale(enable=True, axis='both')
  467. plt.title("Ports Distribution")
  468. plt.xlabel('Ports Numbers')
  469. plt.ylabel('Number of Packets')
  470. width = 0.1
  471. plt.xlim([0, max(graphx)])
  472. plt.grid(True)
  473. plt.bar(graphx, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  474. out = self.pcap_filepath.replace('.pcap', '_plot-port' + file_ending)
  475. plt.savefig(out,dpi=500)
  476. return out
  477. # Aidmar - This distribution is not drawable for big datasets
  478. def plot_ip_src(file_ending: str):
  479. plt.gcf().clear()
  480. result = self.stats_db._process_user_defined_query(
  481. "SELECT ipAddress, pktsSent FROM ip_statistics")
  482. graphx, graphy = [], []
  483. for row in result:
  484. graphx.append(row[0])
  485. graphy.append(row[1])
  486. plt.autoscale(enable=True, axis='both')
  487. plt.title("Source IP Distribution")
  488. plt.xlabel('Source IP')
  489. plt.ylabel('Number of Packets')
  490. width = 0.1
  491. plt.xlim([0, len(graphx)])
  492. plt.grid(True)
  493. # IPs on x-axis
  494. x = range(0, len(graphx))
  495. my_xticks = graphx
  496. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  497. plt.tight_layout()
  498. # limit the number of xticks
  499. plt.locator_params(axis='x', nbins=20)
  500. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  501. out = self.pcap_filepath.replace('.pcap', '_plot-ip-src' + file_ending)
  502. plt.savefig(out, dpi=500)
  503. return out
  504. # Aidmar - This distribution is not drawable for big datasets
  505. def plot_ip_dst(file_ending: str):
  506. plt.gcf().clear()
  507. result = self.stats_db._process_user_defined_query(
  508. "SELECT ipAddress, pktsReceived FROM ip_statistics")
  509. graphx, graphy = [], []
  510. for row in result:
  511. graphx.append(row[0])
  512. graphy.append(row[1])
  513. plt.autoscale(enable=True, axis='both')
  514. plt.title("Destination IP Distribution")
  515. plt.xlabel('Destination IP')
  516. plt.ylabel('Number of Packets')
  517. width = 0.1
  518. plt.xlim([0, len(graphx)])
  519. plt.grid(True)
  520. # IPs on x-axis
  521. x = range(0, len(graphx))
  522. my_xticks = graphx
  523. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  524. plt.tight_layout()
  525. # limit the number of xticks
  526. plt.locator_params(axis='x', nbins=20)
  527. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  528. out = self.pcap_filepath.replace('.pcap', '_plot-ip-dst' + file_ending)
  529. plt.savefig(out, dpi=500)
  530. return out
  531. # Aidmar
  532. def plot_interval_pktCount(file_ending: str):
  533. plt.gcf().clear()
  534. result = self.stats_db._process_user_defined_query(
  535. "SELECT lastPktTimestamp, pktsCount FROM interval_statistics ORDER BY lastPktTimestamp")
  536. graphx, graphy = [], []
  537. for row in result:
  538. graphx.append(row[0])
  539. graphy.append(row[1])
  540. plt.autoscale(enable=True, axis='both')
  541. plt.title("Packet Rate")
  542. plt.xlabel('Timestamp')
  543. plt.ylabel('Number of Packets')
  544. width = 0.1
  545. plt.xlim([0, len(graphx)])
  546. plt.grid(True)
  547. # timestamp on x-axis
  548. x = range(0, len(graphx))
  549. my_xticks = graphx
  550. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  551. plt.tight_layout()
  552. # limit the number of xticks
  553. plt.locator_params(axis='x', nbins=20)
  554. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  555. out = self.pcap_filepath.replace('.pcap', '_plot-interval-pkt-count' + file_ending)
  556. plt.savefig(out, dpi=500)
  557. return out
  558. # Aidmar
  559. def plot_interval_ip_src_ent(file_ending: str):
  560. plt.gcf().clear()
  561. result = self.stats_db._process_user_defined_query(
  562. "SELECT lastPktTimestamp, ipSrcEntropy FROM interval_statistics ORDER BY lastPktTimestamp")
  563. graphx, graphy = [], []
  564. for row in result:
  565. graphx.append(row[0])
  566. graphy.append(row[1])
  567. plt.autoscale(enable=True, axis='both')
  568. plt.title("Source IP Entropy")
  569. plt.xlabel('Timestamp')
  570. plt.ylabel('Entropy')
  571. width = 0.1
  572. plt.xlim([0, len(graphx)])
  573. plt.grid(True)
  574. # timestamp on x-axis
  575. x = range(0, len(graphx))
  576. my_xticks = graphx
  577. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  578. plt.tight_layout()
  579. # limit the number of xticks
  580. plt.locator_params(axis='x', nbins=20)
  581. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  582. out = self.pcap_filepath.replace('.pcap', '_plot-interval-ip-src-ent' + file_ending)
  583. plt.savefig(out, dpi=500)
  584. return out
  585. # Aidmar
  586. def plot_interval_ip_dst_ent(file_ending: str):
  587. plt.gcf().clear()
  588. result = self.stats_db._process_user_defined_query(
  589. "SELECT lastPktTimestamp, ipDstEntropy FROM interval_statistics ORDER BY lastPktTimestamp")
  590. graphx, graphy = [], []
  591. for row in result:
  592. graphx.append(row[0])
  593. graphy.append(row[1])
  594. plt.autoscale(enable=True, axis='both')
  595. plt.title("Destination IP Entropy")
  596. plt.xlabel('Timestamp')
  597. plt.ylabel('Entropy')
  598. width = 0.1
  599. plt.xlim([0, len(graphx)])
  600. plt.grid(True)
  601. # timestamp on x-axis
  602. x = range(0, len(graphx))
  603. my_xticks = graphx
  604. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  605. plt.tight_layout()
  606. # limit the number of xticks
  607. plt.locator_params(axis='x', nbins=20)
  608. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  609. out = self.pcap_filepath.replace('.pcap', '_plot-interval-ip-dst-ent' + file_ending)
  610. plt.savefig(out, dpi=500)
  611. return out
  612. # Aidmar
  613. def plot_interval_ip_dst_cum_ent(file_ending: str):
  614. plt.gcf().clear()
  615. result = self.stats_db._process_user_defined_query(
  616. "SELECT lastPktTimestamp, ipDstCumEntropy FROM interval_statistics ORDER BY lastPktTimestamp")
  617. graphx, graphy = [], []
  618. for row in result:
  619. graphx.append(row[0])
  620. graphy.append(row[1])
  621. plt.autoscale(enable=True, axis='both')
  622. plt.title("Destination IP Cumulative Entropy")
  623. plt.xlabel('Timestamp')
  624. plt.ylabel('Entropy')
  625. plt.xlim([0, len(graphx)])
  626. plt.grid(True)
  627. # timestamp on x-axis
  628. x = range(0, len(graphx))
  629. my_xticks = graphx
  630. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  631. plt.tight_layout()
  632. # limit the number of xticks
  633. plt.locator_params(axis='x', nbins=20)
  634. plt.plot(x, graphy, 'r')
  635. out = self.pcap_filepath.replace('.pcap', '_plot-interval-ip-dst-cum-ent' + file_ending)
  636. plt.savefig(out, dpi=500)
  637. return out
  638. # Aidmar
  639. def plot_interval_ip_src_cum_ent(file_ending: str):
  640. plt.gcf().clear()
  641. result = self.stats_db._process_user_defined_query(
  642. "SELECT lastPktTimestamp, ipSrcCumEntropy FROM interval_statistics ORDER BY lastPktTimestamp")
  643. graphx, graphy = [], []
  644. for row in result:
  645. graphx.append(row[0])
  646. graphy.append(row[1])
  647. plt.autoscale(enable=True, axis='both')
  648. plt.title("Source IP Cumulative Entropy")
  649. plt.xlabel('Timestamp')
  650. plt.ylabel('Entropy')
  651. plt.xlim([0, len(graphx)])
  652. plt.grid(True)
  653. # timestamp on x-axis
  654. x = range(0, len(graphx))
  655. my_xticks = graphx
  656. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  657. plt.tight_layout()
  658. # limit the number of xticks
  659. plt.locator_params(axis='x',nbins=20)
  660. plt.plot(x, graphy, 'r')
  661. out = self.pcap_filepath.replace('.pcap', '_plot-interval-ip-src-cum-ent' + file_ending)
  662. plt.savefig(out, dpi=500)
  663. return out
  664. # Aidmar
  665. def plot_interval_new_ip(file_ending: str):
  666. plt.gcf().clear()
  667. result = self.stats_db._process_user_defined_query(
  668. "SELECT lastPktTimestamp, newIPCount FROM interval_statistics ORDER BY lastPktTimestamp")
  669. graphx, graphy = [], []
  670. for row in result:
  671. graphx.append(row[0])
  672. graphy.append(row[1])
  673. plt.autoscale(enable=True, axis='both')
  674. plt.title("IP Novelity Distribution")
  675. plt.xlabel('Timestamp')
  676. plt.ylabel('Novel values count')
  677. plt.xlim([0, len(graphx)])
  678. plt.grid(True)
  679. width = 0.1
  680. # timestamp on x-axis
  681. x = range(0, len(graphx))
  682. my_xticks = graphx
  683. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  684. plt.tight_layout()
  685. # limit the number of xticks
  686. plt.locator_params(axis='x', nbins=20)
  687. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  688. out = self.pcap_filepath.replace('.pcap', '_plot-interval-novel-ip-dist' + file_ending)
  689. plt.savefig(out, dpi=500)
  690. print("IP Standard Deviation:")
  691. self.calculate_standard_deviation(graphy)
  692. return out
  693. # Aidmar
  694. def plot_interval_new_ttl(file_ending: str):
  695. plt.gcf().clear()
  696. result = self.stats_db._process_user_defined_query(
  697. "SELECT lastPktTimestamp, newTTLCount FROM interval_statistics ORDER BY lastPktTimestamp")
  698. if(result):
  699. graphx, graphy = [], []
  700. for row in result:
  701. graphx.append(row[0])
  702. graphy.append(row[1])
  703. plt.autoscale(enable=True, axis='both')
  704. plt.title("TTL Novelity Distribution")
  705. plt.xlabel('Timestamp')
  706. plt.ylabel('Novel values count')
  707. plt.xlim([0, len(graphx)])
  708. plt.grid(True)
  709. width = 0.1
  710. # timestamp on x-axis
  711. x = range(0, len(graphx))
  712. my_xticks = graphx
  713. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  714. plt.tight_layout()
  715. # limit the number of xticks
  716. plt.locator_params(axis='x', nbins=20)
  717. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  718. out = self.pcap_filepath.replace('.pcap', '_plot-interval-novel-ttl-dist' + file_ending)
  719. plt.savefig(out, dpi=500)
  720. print("TTL Standard Deviation:")
  721. self.calculate_standard_deviation(graphy)
  722. return out
  723. else:
  724. print("Error plot TTL: No TTL values found!")
  725. # Aidmar
  726. def plot_interval_new_tos(file_ending: str):
  727. plt.gcf().clear()
  728. result = self.stats_db._process_user_defined_query(
  729. "SELECT lastPktTimestamp, newToSCount FROM interval_statistics ORDER BY lastPktTimestamp")
  730. graphx, graphy = [], []
  731. for row in result:
  732. graphx.append(row[0])
  733. graphy.append(row[1])
  734. plt.autoscale(enable=True, axis='both')
  735. plt.title("ToS Novelity Distribution")
  736. plt.xlabel('Timestamp')
  737. plt.ylabel('Novel values count')
  738. plt.xlim([0, len(graphx)])
  739. plt.grid(True)
  740. width = 0.1
  741. # timestamp on x-axis
  742. x = range(0, len(graphx))
  743. my_xticks = graphx
  744. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  745. plt.tight_layout()
  746. # limit the number of xticks
  747. plt.locator_params(axis='x', nbins=20)
  748. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  749. out = self.pcap_filepath.replace('.pcap', '_plot-interval-novel-tos-dist' + file_ending)
  750. plt.savefig(out, dpi=500)
  751. print("ToS Standard Deviation:")
  752. self.calculate_standard_deviation(graphy)
  753. return out
  754. # Aidmar
  755. def plot_interval_new_win_size(file_ending: str):
  756. plt.gcf().clear()
  757. result = self.stats_db._process_user_defined_query(
  758. "SELECT lastPktTimestamp, newWinSizeCount FROM interval_statistics ORDER BY lastPktTimestamp")
  759. if(result):
  760. graphx, graphy = [], []
  761. for row in result:
  762. graphx.append(row[0])
  763. graphy.append(row[1])
  764. plt.autoscale(enable=True, axis='both')
  765. plt.title("Window Size Novelity Distribution")
  766. plt.xlabel('Timestamp')
  767. plt.ylabel('Novel values count')
  768. plt.xlim([0, len(graphx)])
  769. plt.grid(True)
  770. width = 0.1
  771. # timestamp on x-axis
  772. x = range(0, len(graphx))
  773. my_xticks = graphx
  774. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  775. plt.tight_layout()
  776. # limit the number of xticks
  777. plt.locator_params(axis='x', nbins=20)
  778. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  779. out = self.pcap_filepath.replace('.pcap', '_plot-interval-novel-win-size-dist' + file_ending)
  780. plt.savefig(out, dpi=500)
  781. # Calculate Standart Deviation
  782. print("Window Size Standard Deviation:")
  783. self.calculate_standard_deviation(graphy)
  784. return out
  785. else:
  786. print("Error plot new values WinSize: No WinSize values found!")
  787. # Aidmar
  788. def plot_interval_new_mss(file_ending: str):
  789. plt.gcf().clear()
  790. result = self.stats_db._process_user_defined_query(
  791. "SELECT lastPktTimestamp, newMSSCount FROM interval_statistics ORDER BY lastPktTimestamp")
  792. if(result):
  793. graphx, graphy = [], []
  794. for row in result:
  795. graphx.append(row[0])
  796. graphy.append(row[1])
  797. plt.autoscale(enable=True, axis='both')
  798. plt.title("MSS Novelity Distribution")
  799. plt.xlabel('Timestamp')
  800. plt.ylabel('Novel values count')
  801. plt.xlim([0, len(graphx)])
  802. plt.grid(True)
  803. width = 0.1
  804. # timestamp on x-axis
  805. x = range(0, len(graphx))
  806. my_xticks = graphx
  807. plt.xticks(x, my_xticks, rotation='vertical', fontsize=5)
  808. plt.tight_layout()
  809. # limit the number of xticks
  810. plt.locator_params(axis='x', nbins=20)
  811. plt.bar(x, graphy, width, align='center', linewidth=1, color='red', edgecolor='red')
  812. out = self.pcap_filepath.replace('.pcap', '_plot-interval-novel-mss-dist' + file_ending)
  813. plt.savefig(out, dpi=500)
  814. # Calculate Standart Deviation
  815. print("MSS Standard Deviation:")
  816. self.calculate_standard_deviation(graphy)
  817. return out
  818. else:
  819. print("Error plot new values MSS: No MSS values found!")
  820. ttl_out_path = plot_ttl('.' + format)
  821. mss_out_path = plot_mss('.' + format)
  822. win_out_path = plot_win('.' + format)
  823. protocol_out_path = plot_protocol('.' + format)
  824. port_out_path = plot_port('.' + format)
  825. #ip_src_out_path = plot_ip_src('.' + format)
  826. #ip_dst_out_path = plot_ip_dst('.' + format)
  827. plot_interval_pktCount = plot_interval_pktCount('.' + format)
  828. plot_interval_ip_src_ent = plot_interval_ip_src_ent('.' + format)
  829. plot_interval_ip_dst_ent = plot_interval_ip_dst_ent('.' + format)
  830. plot_interval_ip_src_cum_ent = plot_interval_ip_src_cum_ent('.' + format)
  831. plot_interval_ip_dst_cum_ent = plot_interval_ip_dst_cum_ent('.' + format)
  832. plot_interval_new_ip = plot_interval_new_ip('.' + format)
  833. plot_interval_new_ttl = plot_interval_new_ttl('.' + format)
  834. plot_interval_new_tos = plot_interval_new_tos('.' + format)
  835. plot_interval_new_win_size = plot_interval_new_win_size('.' + format)
  836. plot_interval_new_mss = plot_interval_new_mss('.' + format)
  837. #print("Saved distributions plots at: %s, %s, %s, %s, %s, %s, %s, %s %s" %(ttl_out_path,mss_out_path, win_out_path,
  838. #protocol_out_path, port_out_path,ip_src_out_path,ip_dst_out_path, plot_interval_pktCount))
  839. # Aidmar
  840. def calculate_complement_packet_rates(self, pps):
  841. """
  842. Calculates the complement packet rates of the background traffic packet rates for each interval.
  843. Then normalize it to maximum boundary, which is the input parameter pps
  844. :return: normalized packet rates for each time interval.
  845. """
  846. result = self.process_db_query(
  847. "SELECT lastPktTimestamp,pktsCount FROM interval_statistics ORDER BY lastPktTimestamp")
  848. # print(result)
  849. bg_interval_pps = []
  850. complement_interval_pps = []
  851. intervalsSum = 0
  852. if result:
  853. # Get the interval in seconds
  854. for i, row in enumerate(result):
  855. if i < len(result) - 1:
  856. intervalsSum += ceil((int(result[i + 1][0]) * 10 ** -6) - (int(row[0]) * 10 ** -6))
  857. interval = intervalsSum / (len(result) - 1)
  858. # Convert timestamp from micro to seconds, convert packet rate "per interval" to "per second"
  859. for row in result:
  860. bg_interval_pps.append((int(row[0]) * 10 ** -6, int(row[1] / interval)))
  861. # Find max PPS
  862. maxPPS = max(bg_interval_pps, key=itemgetter(1))[1]
  863. for row in bg_interval_pps:
  864. complement_interval_pps.append((row[0], int(pps * (maxPPS - row[1]) / maxPPS)))
  865. return complement_interval_pps
  866. """
  867. # Aidmar
  868. # bhattacharyya test
  869. import math
  870. def mean(hist):
  871. mean = 0.0;
  872. for i in hist:
  873. mean += i;
  874. mean /= len(hist);
  875. return mean;
  876. def bhatta(hist1, hist2):
  877. # calculate mean of hist1
  878. h1_ = mean(hist1);
  879. # calculate mean of hist2
  880. h2_ = mean(hist2);
  881. # calculate score
  882. score = 0;
  883. for i in range(len(hist1)):
  884. score += math.sqrt(hist1[i] * hist2[i]);
  885. # print h1_,h2_,score;
  886. score = math.sqrt(1 - (1 / math.sqrt(h1_ * h2_ * len(hist1) * len(hist1))) * score);
  887. return score;
  888. print("\nbhatta distance: " + str(bhatta(graphy, graphy_aftr)))
  889. """