Added options for enabling and disabling network caching. Also modified tap polling interval

This commit is contained in:
Joseph Henry
2019-02-07 10:53:50 -08:00
parent 4de1068b57
commit 52a7e9229e
3 changed files with 95 additions and 45 deletions

View File

@@ -25,9 +25,9 @@
#define ZTS_CALLBACK_PROCESSING_INTERVAL 25
/**
* Polling interval (in ms) for file descriptors wrapped in the Phy I/O loop (for raw drivers only)
* Polling interval (in ms) for fds wrapped in the Phy I/O loop
*/
#define ZTS_PHY_POLL_INTERVAL 1
#define ZTS_TAP_THREAD_POLLING_INTERVAL 50
#define ZTS_HOUSEKEEPING_INTERVAL 50
@@ -41,19 +41,45 @@
//////////////////////////////////////////////////////////////////////////////
#define ZTS_SERVICE_THREAD_NAME "ZeroTierServiceThread"
#define ZTS_EVENT_CALLBACK_THREAD_NAME "ZeroTierEventCallbackThread"
//////////////////////////////////////////////////////////////////////////////
// lwIP behaviour (tcpip driver) //
//////////////////////////////////////////////////////////////////////////////
/**
* How many frames are handled per call from core
*/
#define LWIP_FRAMES_HANDLED_PER_CORE_CALL 16
/**
* How often the lwIP tcpip thread callback checks for incoming frames
*/
#define LWIP_GUARDED_BUF_CHECK_INTERVAL 5
#define LWIP_FRAMES_HANDLED_PER_CORE_CALL 16 // How many frames are handled per call from core
#define LWIP_GUARDED_BUF_CHECK_INTERVAL 5 // in ms
#define LWIP_MAX_GUARDED_RX_BUF_SZ 1024 // number of frame pointers that can be cached waiting for receipt into core
/**
* Number of frame pointers that can be cached waiting for receipt into core
*/
#define LWIP_MAX_GUARDED_RX_BUF_SZ 1024
//////////////////////////////////////////////////////////////////////////////
// Service behaviour //
//////////////////////////////////////////////////////////////////////////////
/**
* Whether the service will cache peer details (such as known paths). This will
* make startup and reachability time shorter but is generally only effective
* for networks with a somewhat static topology. In other words this would not be
* recommended for use on mobile devices.
*/
#define PEER_CACHING 0
/**
* Whether the service will cache network details. This will shorten startup
* times. This allows the service to nearly instantly inform the network stack
* of an address to use for this peer so that it can create an interface. This
* is only recommended for networks whose IP assignments do not change often.
*/
#define NETWORK_CACHING 1
#endif

View File

@@ -430,6 +430,19 @@ public:
}
}
#endif
#if NETWORK_CACHING
// Join existing networks in networks.d
{
std::vector<std::string> networksDotD(OSUtils::listDirectory((_homePath + ZT_PATH_SEPARATOR_S "networks.d").c_str()));
for(std::vector<std::string>::iterator f(networksDotD.begin());f!=networksDotD.end();++f) {
std::size_t dot = f->find_last_of('.');
if ((dot == 16)&&(f->substr(16) == ".conf"))
_node->join(Utils::hexStrToU64(f->substr(0,dot).c_str()),(void *)0,(void *)0);
}
}
#endif
// Main I/O loop
_nextBackgroundTaskDeadline = 0;
int64_t clockShouldBe = OSUtils::now();
@@ -660,11 +673,10 @@ public:
}
// Apply or update managed IPs for a configured network (be sure n.tap exists)
void syncManagedStuff(NetworkState &n,bool syncIps,bool syncRoutes)
void syncManagedStuff(NetworkState &n)
{
char ipbuf[64];
// assumes _nets_m is locked
if (syncIps) {
std::vector<InetAddress> newManagedIps;
newManagedIps.reserve(n.config.assignedAddressCount);
for(unsigned int i=0;i<n.config.assignedAddressCount;++i) {
@@ -687,7 +699,6 @@ public:
}
}
n.managedIps.swap(newManagedIps);
}
}
@@ -759,7 +770,7 @@ public:
case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
ZT_FAST_MEMCPY(&(n.config),nwc,sizeof(ZT_VirtualNetworkConfig));
if (n.tap) { // sanity check
syncManagedStuff(n,true,true);
syncManagedStuff(n);
n.tap->setMtu(nwc->mtu);
} else {
_nets.erase(nwid);
@@ -773,11 +784,17 @@ public:
*nuptr = (void *)0;
delete n.tap;
_nets.erase(nwid);
#if NETWORK_CACHING
if (op == ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY) {
char nlcpath[256];
OSUtils::ztsnprintf(nlcpath,sizeof(nlcpath),"%s" ZT_PATH_SEPARATOR_S "networks.d" ZT_PATH_SEPARATOR_S "%.16llx.local.conf",_homePath.c_str(),nwid);
OSUtils::rm(nlcpath);
}
#endif
} else {
_nets.erase(nwid);
}
break;
}
return 0;
}
@@ -934,6 +951,13 @@ public:
case ZT_STATE_OBJECT_PLANET:
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "planet",_homePath.c_str());
break;
#if NETWORK_CACHING
case ZT_STATE_OBJECT_NETWORK_CONFIG:
OSUtils::ztsnprintf(dirname,sizeof(dirname),"%s" ZT_PATH_SEPARATOR_S "networks.d",_homePath.c_str());
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx.conf",dirname,(unsigned long long)id[0]);
secure = true;
break;
#endif
#if PEER_CACHING
case ZT_STATE_OBJECT_PEER:
OSUtils::ztsnprintf(dirname,sizeof(dirname),"%s" ZT_PATH_SEPARATOR_S "peers.d",_homePath.c_str());
@@ -989,6 +1013,11 @@ public:
case ZT_STATE_OBJECT_PLANET:
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "planet",_homePath.c_str());
break;
#if NETWORK_CACHING
case ZT_STATE_OBJECT_NETWORK_CONFIG:
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "networks.d" ZT_PATH_SEPARATOR_S "%.16llx.conf",_homePath.c_str(),(unsigned long long)id[0]);
break;
#endif
#if PEER_CACHING
case ZT_STATE_OBJECT_PEER:
OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "peers.d" ZT_PATH_SEPARATOR_S "%.10llx.peer",_homePath.c_str(),(unsigned long long)id[0]);

View File

@@ -144,11 +144,11 @@ bool VirtualTap::addIp(const InetAddress &ip)
{
char ipbuf[INET6_ADDRSTRLEN];
Mutex::Lock _l(_ips_m);
lwip_init_interface((void*)this, this->_mac, ip);
if (std::find(_ips.begin(),_ips.end(),ip) == _ips.end()) {
_ips.push_back(ip);
std::sort(_ips.begin(),_ips.end());
}
lwip_init_interface((void*)this, this->_mac, ip);
return true;
}
@@ -256,18 +256,13 @@ void VirtualTap::threadMain()
if (FD_ISSET(_shutdownSignalPipe[0],&readfds)) {
break;
}
#ifdef _MSC_VER
Sleep(ZTS_PHY_POLL_INTERVAL);
_phy.poll(0);
#if defined(_WIN32)
Sleep(ZTS_TAP_THREAD_POLLING_INTERVAL);
#else
_phy.poll(ZTS_PHY_POLL_INTERVAL);
struct timespec sleepValue = {0};
sleepValue.tv_nsec = ZTS_TAP_THREAD_POLLING_INTERVAL * 500000;
nanosleep(&sleepValue, NULL);
#endif
uint64_t current_ts = OSUtils::now();
if (current_ts > last_housekeeping_ts + ZTS_HOUSEKEEPING_INTERVAL) {
Housekeeping();
last_housekeeping_ts = OSUtils::now();
}
}
}