MulticastTopology.cpp 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * ZeroTier One - Global Peer to Peer Ethernet
  3. * Copyright (C) 2011-2014 ZeroTier Networks LLC
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * ZeroTier may be used and distributed under the terms of the GPLv3, which
  21. * are available at: http://www.gnu.org/licenses/gpl-3.0.html
  22. *
  23. * If you would like to embed ZeroTier into a commercial application or
  24. * redistribute it in a modified binary form, please contact ZeroTier Networks
  25. * LLC. Start here: http://www.zerotier.com/
  26. */
  27. #include <algorithm>
  28. #include "Constants.hpp"
  29. #include "MulticastTopology.hpp"
  30. #include "Topology.hpp"
  31. namespace ZeroTier {
  32. MulticastTopology::MulticastTopology()
  33. {
  34. }
  35. MulticastTopology::~MulticastTopology()
  36. {
  37. }
  38. void MulticastTopology::add(const MulticastGroup &mg,const Address &member,const Address &learnedFrom)
  39. {
  40. Mutex::Lock _l(_groups_m);
  41. std::vector<MulticastGroupMember> &mv = _groups[mg].members;
  42. for(std::vector<MulticastGroupMember>::iterator m(mv.begin());m!=mv.end();++m) {
  43. if (m->address == member) {
  44. if (m->learnedFrom) // once a member has been seen directly, we keep its status as direct
  45. m->learnedFrom = learnedFrom;
  46. m->timestamp = Utils::now();
  47. return;
  48. }
  49. }
  50. mv.push_back(MulticastGroupMember(member,learnedFrom,Utils::now()));
  51. }
  52. void MulticastTopology::erase(const MulticastGroup &mg,const Address &member)
  53. {
  54. Mutex::Lock _l(_groups_m);
  55. std::map< MulticastGroup,MulticastGroupStatus >::iterator r(_groups.find(mg));
  56. if (r != _groups.end()) {
  57. for(std::vector<MulticastGroupMember>::iterator m(r->second.members.begin());m!=r->second.members.end();++m) {
  58. if (m->address == member) {
  59. r->second.members.erase(m);
  60. if (r->second.members.empty())
  61. _groups.erase(r);
  62. return;
  63. }
  64. }
  65. }
  66. }
  67. unsigned int MulticastTopology::want(const MulticastGroup &mg,uint64_t now,unsigned int limit,bool updateLastGatheredTimeOnNonzeroReturn)
  68. {
  69. Mutex::Lock _l(_groups_m);
  70. MulticastGroupStatus &gs = _groups[mg];
  71. if ((unsigned int)gs.members.size() >= limit) {
  72. // We already caught our limit, don't need to go fishing any more.
  73. return 0;
  74. } else {
  75. // Compute the delay between fishing expeditions from the fraction of the limit that we already have.
  76. const uint64_t rateDelay = (uint64_t)ZT_MULTICAST_TOPOLOGY_GATHER_DELAY_MIN + (uint64_t)(((double)gs.members.size() / (double)limit) * (double)(ZT_MULTICAST_TOPOLOGY_GATHER_DELAY_MAX - ZT_MULTICAST_TOPOLOGY_GATHER_DELAY_MIN));
  77. if ((now - gs.lastGatheredMembers) >= rateDelay) {
  78. if (updateLastGatheredTimeOnNonzeroReturn)
  79. gs.lastGatheredMembers = now;
  80. return (limit - (unsigned int)gs.members.size());
  81. } else return 0;
  82. }
  83. }
  84. void MulticastTopology::clean(uint64_t now,const Topology &topology)
  85. {
  86. Mutex::Lock _l(_groups_m);
  87. for(std::map< MulticastGroup,MulticastGroupStatus >::iterator mm(_groups.begin());mm!=_groups.end();) {
  88. std::vector<MulticastGroupMember>::iterator reader(mm->second.members.begin());
  89. std::vector<MulticastGroupMember>::iterator writer(mm->second.members.begin());
  90. unsigned int count = 0;
  91. while (reader != mm->second.members.end()) {
  92. if ((now - reader->timestamp) < ZT_MULTICAST_LIKE_EXPIRE) {
  93. *writer = *reader;
  94. /* We rank in ascending order of most recent relevant activity. For peers we've learned
  95. * about by direct LIKEs, we do this in order of their own activity. For indirectly
  96. * acquired peers we do this minus a constant to place these categorically below directly
  97. * learned peers. For peers with no active Peer record, we use the time we last learned
  98. * about them minus one day (a large constant) to put these at the bottom of the list.
  99. * List is sorted in ascending order of rank and multicasts are sent last-to-first. */
  100. if (writer->learnedFrom) {
  101. SharedPtr<Peer> p(topology.getPeer(writer->learnedFrom));
  102. if (p)
  103. writer->rank = p->lastUnicastFrame() - ZT_MULTICAST_LIKE_EXPIRE;
  104. else writer->rank = writer->timestamp - (86400000 + ZT_MULTICAST_LIKE_EXPIRE);
  105. } else {
  106. SharedPtr<Peer> p(topology.getPeer(writer->address));
  107. if (p)
  108. writer->rank = p->lastUnicastFrame();
  109. else writer->rank = writer->timestamp - 86400000;
  110. }
  111. ++writer;
  112. ++count;
  113. }
  114. ++reader;
  115. }
  116. if (count) {
  117. std::sort(mm->second.members.begin(),writer); // sorts in ascending order of rank
  118. mm->second.members.resize(count); // trim off the ones we cut, after writer
  119. ++mm;
  120. } else _groups.erase(mm++);
  121. }
  122. }
  123. } // namespace ZeroTier