PcapFile.py 3.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. import hashlib
  2. import os.path
  3. import ID2TLib.libpcapreader as pr
  4. class PcapFile(object):
  5. def __init__(self, pcap_file_path: str):
  6. """
  7. Creates a new PcapFile associated to the PCAP file at pcap_file_path.
  8. :param pcap_file_path: The path to the PCAP file
  9. """
  10. self.pcap_file_path = pcap_file_path
  11. def merge_attack(self, attack_pcap_path: str):
  12. """
  13. Merges the loaded PCAP with the PCAP at attack_pcap_path.
  14. :param attack_pcap_path: The path to the PCAP file to merge with the PCAP at pcap_file_path
  15. :return: The file path of the resulting PCAP file
  16. """
  17. pcap = pr.pcap_processor(self.pcap_file_path)
  18. file_out_path = pcap.merge_pcaps(attack_pcap_path)
  19. return file_out_path
  20. def get_file_hash(self):
  21. """
  22. Returns the hash for the loaded PCAP file. The hash is calculated bsaed on:
  23. - the file size in bytes
  24. - the first 224*40000 bytes of the file
  25. :return: The hash for the PCAP file as string.
  26. """
  27. # Blocksize in bytes
  28. const_blocksize = 224
  29. # Number of blocks to read at beginning of file
  30. const_max_blocks_read = 40000
  31. # Initialize required variables
  32. hasher = hashlib.sha224()
  33. blocks_read = 0
  34. # Hash calculation
  35. with open(self.pcap_file_path, 'rb') as afile:
  36. # Add filename -> makes trouble when renaming the PCAP
  37. # hasher.update(afile.name.encode('utf-8'))
  38. # Add file's last modification date -> makes trouble when copying the PCAP
  39. # hasher.update(str(time.ctime(os.path.getmtime(self.pcap_file_path))).encode('utf-8'))
  40. # Add file size
  41. hasher.update(str(os.path.getsize(self.pcap_file_path)).encode('utf-8'))
  42. # Add max. first 40000 * 224 bytes = 8,5 MB of file
  43. buf = afile.read(const_blocksize)
  44. blocks_read += 1
  45. while len(buf) > 0 and blocks_read < const_max_blocks_read:
  46. hasher.update(buf)
  47. buf = afile.read(const_blocksize)
  48. blocks_read += 1
  49. return hasher.hexdigest()
  50. def get_db_path(self, root_directory: str = os.path.join(os.path.expanduser('~'), 'ID2T_data', 'db')):
  51. """
  52. Creates a path based on a hashed directory structure. Derives a hash code by the file's hash and derives
  53. thereof the database path.
  54. Code and idea based on:
  55. http://michaelandrews.typepad.com/the_technical_times/2009/10/creating-a-hashed-directory-structure.html
  56. :param root_directory: The root directory of the hashed directory structure (optional)
  57. :return: The full path to the database file
  58. """
  59. def hashcode(input: str):
  60. """
  61. Creates a hashcode of a string, based on Java's hashcode implementation.
  62. Code based on: http://garage.pimentech.net/libcommonPython_src_python_libcommon_javastringhashcode/
  63. :param input: The string the hashcode should be calculated from
  64. :return: The hashcode as string
  65. """
  66. h = 0
  67. for c in input:
  68. h = (31 * h + ord(c)) & 0xFFFFFFFF
  69. return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
  70. file_hash = self.get_file_hash()
  71. hashcode = hashcode(file_hash)
  72. mask = 255
  73. dir_first_level = hashcode & mask
  74. dir_second_level = (hashcode >> 8) & mask
  75. return os.path.join(root_directory, str(dir_first_level), str(dir_second_level), file_hash[0:12] + ".sqlite3")