iterator.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*
  2. * ZMap Copyright 2013 Regents of the University of Michigan
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License"); you may not
  5. * use this file except in compliance with the License. You may obtain a copy
  6. * of the License at http://www.apache.org/licenses/LICENSE-2.0
  7. */
  8. #include <assert.h>
  9. #include <pthread.h>
  10. #include <stdint.h>
  11. #include <time.h>
  12. #include "../lib/includes.h"
  13. #include "../lib/blacklist.h"
  14. #include "../lib/logger.h"
  15. #include "../lib/xalloc.h"
  16. #include "iterator.h"
  17. #include "aesrand.h"
  18. #include "shard.h"
  19. #include "state.h"
  20. struct iterator {
  21. cycle_t cycle;
  22. uint8_t num_threads;
  23. shard_t *thread_shards;
  24. uint8_t *complete;
  25. pthread_mutex_t mutex;
  26. uint32_t curr_threads;
  27. };
  28. void shard_complete(uint8_t thread_id, void *arg)
  29. {
  30. iterator_t *it = (iterator_t *) arg;
  31. assert(thread_id < it->num_threads);
  32. pthread_mutex_lock(&it->mutex);
  33. it->complete[thread_id] = 1;
  34. it->curr_threads--;
  35. shard_t *s = &it->thread_shards[thread_id];
  36. zsend.sent += s->state.sent;
  37. zsend.blacklisted += s->state.blacklisted;
  38. zsend.whitelisted += s->state.whitelisted;
  39. zsend.sendto_failures += s->state.failures;
  40. uint8_t done = 1;
  41. for (uint8_t i = 0; done && (i < it->num_threads); ++i) {
  42. done = done && it->complete[i];
  43. }
  44. if (done) {
  45. zsend.finish = now();
  46. zsend.complete = 1;
  47. zsend.first_scanned = it->thread_shards[0].state.first_scanned;
  48. }
  49. pthread_mutex_unlock(&it->mutex);
  50. }
  51. iterator_t* iterator_init(uint8_t num_threads, uint8_t shard,
  52. uint8_t num_shards)
  53. {
  54. uint64_t num_addrs = blacklist_count_allowed();
  55. iterator_t *it = xmalloc(sizeof(struct iterator));
  56. const cyclic_group_t *group = get_group(num_addrs);
  57. if (num_addrs > (1LL << 32)) {
  58. zsend.max_index = 0xFFFFFFFF;
  59. } else {
  60. zsend.max_index = (uint32_t) num_addrs;
  61. }
  62. it->cycle = make_cycle(group, zconf.aes);
  63. it->num_threads = num_threads;
  64. it->curr_threads = num_threads;
  65. it->thread_shards = xcalloc(num_threads, sizeof(shard_t));
  66. it->complete = xcalloc(it->num_threads, sizeof(uint8_t));
  67. pthread_mutex_init(&it->mutex, NULL);
  68. for (uint8_t i = 0; i < num_threads; ++i) {
  69. shard_init(&it->thread_shards[i],
  70. shard,
  71. num_shards,
  72. i,
  73. num_threads,
  74. &it->cycle,
  75. shard_complete,
  76. it
  77. );
  78. }
  79. zconf.generator = it->cycle.generator;
  80. return it;
  81. }
  82. uint32_t iterator_get_sent(iterator_t *it)
  83. {
  84. uint32_t sent = 0;
  85. for (uint8_t i = 0; i < it->num_threads; ++i) {
  86. sent += it->thread_shards[i].state.sent;
  87. }
  88. return sent;
  89. }
  90. shard_t* get_shard(iterator_t *it, uint8_t thread_id)
  91. {
  92. assert(thread_id < it->num_threads);
  93. return &it->thread_shards[thread_id];
  94. }
  95. uint32_t iterator_get_curr_send_threads(iterator_t *it)
  96. {
  97. assert(it);
  98. return it->curr_threads;
  99. }