forked from I2P_Developers/i2p.i2p
Merge branch '465-subdb-ri' into 'master'
NetDB: Enforce no RIs in subdbs (Gitlab #465) Closes #465 See merge request i2p-hackers/i2p.i2p!143
This commit is contained in:
@ -95,7 +95,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
// for ISJ
|
||||
_context.statManager().createRateStat("netDb.RILookupDirect", "Was an iterative RI lookup sent directly?", "NetworkDatabase", rate);
|
||||
// No need to start the FloodfillMonitorJob for client subDb.
|
||||
if (!isMainDb())
|
||||
if (isClientDb())
|
||||
_ffMonitor = null;
|
||||
else
|
||||
_ffMonitor = new FloodfillMonitorJob(_context, this);
|
||||
@ -107,7 +107,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
super.startup();
|
||||
if (_ffMonitor != null)
|
||||
_context.jobQueue().addJob(_ffMonitor);
|
||||
if (!isMainDb()) {
|
||||
if (isClientDb()) {
|
||||
isFF = false;
|
||||
} else {
|
||||
isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
|
||||
@ -116,7 +116,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
long down = _context.router().getEstimatedDowntime();
|
||||
if (!_context.commSystem().isDummy() && isMainDb() &&
|
||||
if (!_context.commSystem().isDummy() && !isClientDb() &&
|
||||
(down == 0 || (!isFF && down > 30*60*1000) || (isFF && down > 24*60*60*1000))) {
|
||||
// refresh old routers
|
||||
Job rrj = new RefreshRoutersJob(_context, this);
|
||||
@ -128,7 +128,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
@Override
|
||||
protected void createHandlers() {
|
||||
// Only initialize the handlers for the flooodfill netDb.
|
||||
if (isMainDb()) {
|
||||
if (!isClientDb()) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("[dbid: " + super._dbid + "] Initializing the message handlers");
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this));
|
||||
@ -435,13 +435,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PeerSelector createPeerSelector() {
|
||||
if (_peerSelector != null)
|
||||
return _peerSelector;
|
||||
return new FloodfillPeerSelector(_context);
|
||||
}
|
||||
|
||||
/**
|
||||
* Public, called from console. This wakes up the floodfill monitor,
|
||||
* which will rebuild the RI and log in the event log,
|
||||
|
@ -148,11 +148,11 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
}
|
||||
|
||||
// garlic encrypt to hide contents from the OBEP
|
||||
RouterInfo peer = _facade.lookupRouterInfoLocally(_target);
|
||||
RouterInfo peer = ctx.netDb().lookupRouterInfoLocally(_target);
|
||||
if (peer == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("(JobId: " + getJobId()
|
||||
+ "; dbid: " + _facade._dbid
|
||||
+ "; db: " + _facade
|
||||
+ ") Fail looking up RI locally for target " + _target);
|
||||
_facade.verifyFinished(_key);
|
||||
return;
|
||||
@ -245,7 +245,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
|
||||
_log.info("[JobId: " + getJobId() + "; db: " + _facade
|
||||
+ "]: Starting verify (stored " + _key + " to " + _sentTo + "), asking " + _target);
|
||||
_sendTime = ctx.clock().now();
|
||||
_expiration = _sendTime + VERIFY_TIMEOUT;
|
||||
@ -277,7 +277,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
if (peers.isEmpty())
|
||||
break;
|
||||
Hash peer = peers.get(0);
|
||||
RouterInfo ri = _facade.lookupRouterInfoLocally(peer);
|
||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
|
||||
//if (ri != null && StoreJob.supportsCert(ri, keyCert)) {
|
||||
if (ri != null && StoreJob.shouldStoreTo(ri) &&
|
||||
//(!_isLS2 || (StoreJob.shouldStoreLS2To(ri) &&
|
||||
@ -334,18 +334,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
return _key.equals(dsm.getKey());
|
||||
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
|
||||
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
|
||||
boolean rv = _key.equals(dsrm.getSearchKey());
|
||||
if (rv) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
|
||||
+ "DSRM key match successful.");
|
||||
} else {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("[JobId: " + getJobId() + "; dbid: " + _facade._dbid
|
||||
+ "]: DSRM key mismatch for key " + _key
|
||||
+ " with DSRM: " + message);
|
||||
}
|
||||
return rv;
|
||||
return _key.equals(dsrm.getSearchKey());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -413,21 +402,16 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
// assume 0 old, all new, 0 invalid, 0 dup
|
||||
pm.dbLookupReply(_target, 0,
|
||||
dsrm.getNumReplies(), 0, 0, delay);
|
||||
// ToDo: Clarify the following log message.
|
||||
// This message is phrased in a manner that draws attention, and indicates
|
||||
// the possibility of a problem that may need follow-up. But examination
|
||||
// of the code did not provide insight as to what is being verified,
|
||||
// and what is failing. This message will be displayed unconditionally
|
||||
// every time a DSRM is handled here.
|
||||
// The peer we asked did not have the key, so _sentTo failed to flood it
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": DSRM verify failed (dbid: "
|
||||
+ _facade._dbid + ") for " + _key);
|
||||
_log.warn(getJobId() + ": DSRM verify failed (db: "
|
||||
+ _facade + ") for " + _key);
|
||||
// only for RI... LS too dangerous?
|
||||
if (_isRouterInfo) {
|
||||
if (_facade.isClientDb())
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("[Jobid: " + getJobId() + "; dbid: " + _facade._dbid
|
||||
+ "Warning! Client is starting a SingleLookupJob (DIRECT?) for RouterInfo");
|
||||
_log.warn("[Jobid: " + getJobId() + "; db: " + _facade
|
||||
+ "] Warning! Client is starting a SingleLookupJob (DIRECT?) for RouterInfo");
|
||||
ctx.jobQueue().addJob(new SingleLookupJob(ctx, dsrm));
|
||||
}
|
||||
}
|
||||
@ -458,12 +442,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
* So at least we'll try THREE ffs round-robin if things continue to fail...
|
||||
*/
|
||||
private void resend() {
|
||||
// It's safe to check the default netDb first, but if the lookup is for
|
||||
// a client, nearly all RI is expected to be found in the FF netDb.
|
||||
DatabaseEntry ds = _facade.lookupLocally(_key);
|
||||
if ((ds == null) && _facade.isClientDb() && _isRouterInfo)
|
||||
// It's safe to check the floodfill netDb for RI
|
||||
ds = getContext().netDb().lookupLocally(_key);
|
||||
if (ds != null) {
|
||||
// By the time we get here, a minute or more after the store started,
|
||||
// we may have already started a new store
|
||||
|
@ -177,22 +177,24 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_dbid = dbid;
|
||||
_log = _context.logManager().getLog(getClass());
|
||||
_networkID = context.router().getNetworkID();
|
||||
_peerSelector = createPeerSelector();
|
||||
_publishingLeaseSets = new HashMap<Hash, RepublishLeaseSetJob>(8);
|
||||
_activeRequests = new HashMap<Hash, SearchJob>(8);
|
||||
if (!isMainDb()) {
|
||||
if (isClientDb()) {
|
||||
_reseedChecker = null;
|
||||
_blindCache = null;
|
||||
_exploreKeys = null;
|
||||
_erj = null;
|
||||
_peerSelector = ((KademliaNetworkDatabaseFacade) context.netDb()).getPeerSelector();
|
||||
} else {
|
||||
_reseedChecker = new ReseedChecker(context);
|
||||
_blindCache = new BlindCache(context);
|
||||
_exploreKeys = new ConcurrentHashSet<Hash>(64);
|
||||
// We don't have a comm system here to check for ctx.commSystem().isDummy()
|
||||
// we'll check before starting in startup()
|
||||
_erj = new ExpireRoutersJob(_context, this);
|
||||
_peerSelector = createPeerSelector();
|
||||
}
|
||||
_elj = new ExpireLeasesJob(_context, this);
|
||||
// We don't have a comm system here to check for ctx.commSystem().isDummy()
|
||||
// we'll check before starting in startup()
|
||||
_erj = new ExpireRoutersJob(_context, this);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Created KademliaNetworkDatabaseFacade for id: " + dbid);
|
||||
context.statManager().createRateStat("netDb.lookupDeferred", "how many lookups are deferred?", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||
@ -218,13 +220,24 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
return _initialized && _ds != null && _ds.isInitialized();
|
||||
}
|
||||
|
||||
protected abstract PeerSelector createPeerSelector();
|
||||
/**
|
||||
* Only for main DB
|
||||
*/
|
||||
protected PeerSelector createPeerSelector() {
|
||||
if (isClientDb())
|
||||
throw new IllegalStateException();
|
||||
return new FloodfillPeerSelector(_context);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the main DB's peer selector. Client DBs do not have their own.
|
||||
*/
|
||||
public PeerSelector getPeerSelector() { return _peerSelector; }
|
||||
|
||||
/** @since 0.9 */
|
||||
@Override
|
||||
public ReseedChecker reseedChecker() {
|
||||
if (!isMainDb())
|
||||
if (isClientDb())
|
||||
return null;
|
||||
return _reseedChecker;
|
||||
}
|
||||
@ -238,11 +251,14 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* @return
|
||||
*/
|
||||
protected BlindCache blindCache() {
|
||||
if (isMainDb())
|
||||
if (!isClientDb())
|
||||
return _blindCache;
|
||||
return _context.netDb().blindCache();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the main DB's KBucketSet. Client DBs do not have their own.
|
||||
*/
|
||||
KBucketSet<Hash> getKBuckets() { return _kb; }
|
||||
DataStore getDataStore() { return _ds; }
|
||||
|
||||
@ -255,20 +271,20 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
|
||||
/** @return unmodifiable set */
|
||||
public Set<Hash> getExploreKeys() {
|
||||
if (!_initialized || !isMainDb())
|
||||
if (!_initialized || isClientDb())
|
||||
return Collections.emptySet();
|
||||
return Collections.unmodifiableSet(_exploreKeys);
|
||||
}
|
||||
|
||||
public void removeFromExploreKeys(Collection<Hash> toRemove) {
|
||||
if (!_initialized || !isMainDb())
|
||||
if (!_initialized || isClientDb())
|
||||
return;
|
||||
_exploreKeys.removeAll(toRemove);
|
||||
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size());
|
||||
}
|
||||
|
||||
public void queueForExploration(Collection<Hash> keys) {
|
||||
if (!_initialized || !isMainDb())
|
||||
if (!_initialized || isClientDb())
|
||||
return;
|
||||
for (Iterator<Hash> iter = keys.iterator(); iter.hasNext() && _exploreKeys.size() < MAX_EXPLORE_QUEUE; ) {
|
||||
_exploreKeys.add(iter.next());
|
||||
@ -280,16 +296,19 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* Cannot be restarted.
|
||||
*/
|
||||
public synchronized void shutdown() {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("DB shutdown " + this);
|
||||
_initialized = false;
|
||||
if (!_context.commSystem().isDummy() && isMainDb() &&
|
||||
if (!_context.commSystem().isDummy() && !isClientDb() &&
|
||||
_context.router().getUptime() > ROUTER_INFO_EXPIRATION_FLOODFILL + 10*60*1000 + 60*1000) {
|
||||
// expire inline before saving RIs in _ds.stop()
|
||||
Job erj = new ExpireRoutersJob(_context, this);
|
||||
erj.runJob();
|
||||
}
|
||||
_context.jobQueue().removeJob(_elj);
|
||||
_context.jobQueue().removeJob(_erj);
|
||||
if (_kb != null)
|
||||
if (_erj != null)
|
||||
_context.jobQueue().removeJob(_erj);
|
||||
if (_kb != null && !isClientDb())
|
||||
_kb.clear();
|
||||
if (_ds != null)
|
||||
_ds.stop();
|
||||
@ -297,7 +316,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_exploreKeys.clear();
|
||||
if (_negativeCache != null)
|
||||
_negativeCache.stop();
|
||||
if (isMainDb())
|
||||
if (!isClientDb())
|
||||
blindCache().shutdown();
|
||||
}
|
||||
|
||||
@ -340,29 +359,18 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the database is the main netDb. This is the one we're normally using
|
||||
* if you're acting as a floodfill.
|
||||
*
|
||||
* @return true if _dbid == FNDS.MAIN_DBID
|
||||
* @since 0.9.60
|
||||
*/
|
||||
protected boolean isMainDb() {
|
||||
// This is a null check in disguise, don't use equals() here.
|
||||
// FNDS.MAIN_DBID is always null.
|
||||
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
public synchronized void startup() {
|
||||
_log.info("Starting up the kademlia network database");
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Starting up the " + this);
|
||||
RouterInfo ri = _context.router().getRouterInfo();
|
||||
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
|
||||
_kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(),
|
||||
if (isClientDb())
|
||||
_kb = _context.netDb().getKBuckets();
|
||||
else
|
||||
_kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(),
|
||||
BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
|
||||
try {
|
||||
if (isMainDb()) {
|
||||
if (!isClientDb()) {
|
||||
_ds = new PersistentDataStore(_context, dbDir, this);
|
||||
} else {
|
||||
_ds = new TransientDataStore(_context);
|
||||
@ -372,7 +380,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
}
|
||||
_dbDir = dbDir;
|
||||
_negativeCache = new NegativeLookupCache(_context);
|
||||
if (isMainDb())
|
||||
if (!isClientDb())
|
||||
blindCache().startup();
|
||||
|
||||
createHandlers();
|
||||
@ -387,7 +395,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
|
||||
//// expire some routers
|
||||
// Don't run until after RefreshRoutersJob has run, and after validate() will return invalid for old routers.
|
||||
if (!_context.commSystem().isDummy()) {
|
||||
if (!isClientDb() && !_context.commSystem().isDummy()) {
|
||||
boolean isFF = _context.getBooleanProperty(FloodfillMonitorJob.PROP_FLOODFILL_PARTICIPANT);
|
||||
long down = _context.router().getEstimatedDowntime();
|
||||
long delay = (down == 0 || (!isFF && down > 30*60*1000) || (isFF && down > 24*60*60*1000)) ?
|
||||
@ -398,7 +406,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
}
|
||||
|
||||
if (!QUIET) {
|
||||
if (isMainDb()) {
|
||||
if (!isClientDb()) {
|
||||
// fill the search queue with random keys in buckets that are too small
|
||||
// Disabled since KBucketImpl.generateRandomKey() is b0rked,
|
||||
// and anyway, we want to search for a completely random key,
|
||||
@ -420,7 +428,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_log.warn("Operating in quiet mode - not exploring or pushing data proactively, simply reactively");
|
||||
_log.warn("This should NOT be used in production");
|
||||
}
|
||||
if (isMainDb()) {
|
||||
if (!isClientDb()) {
|
||||
// periodically update and resign the router's 'published date', which basically
|
||||
// serves as a version
|
||||
Job plrij = new PublishLocalRouterInfoJob(_context);
|
||||
@ -451,6 +459,10 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* @param peersToIgnore can be null
|
||||
*/
|
||||
public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) {
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return Collections.emptySet();
|
||||
}
|
||||
if (!_initialized) return Collections.emptySet();
|
||||
return new HashSet<Hash>(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
|
||||
}
|
||||
@ -475,8 +487,16 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
}
|
||||
*****/
|
||||
|
||||
/** get the hashes for all known routers */
|
||||
/**
|
||||
* Get the hashes for all known routers
|
||||
*
|
||||
* @return empty set if this is a client DB
|
||||
*/
|
||||
public Set<Hash> getAllRouters() {
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return Collections.emptySet();
|
||||
}
|
||||
if (!_initialized) return Collections.emptySet();
|
||||
Set<Map.Entry<Hash, DatabaseEntry>> entries = _ds.getMapEntries();
|
||||
Set<Hash> rv = new HashSet<Hash>(entries.size());
|
||||
@ -493,6 +513,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* both the kbuckets AND the data store, which was fine when the kbuckets held everything.
|
||||
* But now that is probably not what you want.
|
||||
* Just return the count in the data store.
|
||||
*
|
||||
* @return 0 if this is a client DB
|
||||
*/
|
||||
@Override
|
||||
public int getKnownRouters() {
|
||||
@ -502,6 +524,10 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
_kb.getAll(count);
|
||||
return count.size();
|
||||
****/
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return 0;
|
||||
}
|
||||
if (_ds == null) return 0;
|
||||
int rv = 0;
|
||||
for (DatabaseEntry ds : _ds.getEntries()) {
|
||||
@ -815,12 +841,16 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* However, this may still fire off a lookup if the RI is present but expired (and will return null).
|
||||
* This may result in deadlocks.
|
||||
* For true local only, use lookupLocallyWithoutValidation()
|
||||
*
|
||||
* @return null always for client dbs
|
||||
*/
|
||||
public RouterInfo lookupRouterInfoLocally(Hash key) {
|
||||
if (!_initialized) return null;
|
||||
// Client netDb shouldn't have RI, search for RI in the floodfill netDb.
|
||||
if (isClientDb())
|
||||
return _context.netDb().lookupRouterInfoLocally(key);
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return null;
|
||||
}
|
||||
DatabaseEntry ds = _ds.get(key);
|
||||
if (ds != null) {
|
||||
if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
@ -904,8 +934,11 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* Stores to local db only.
|
||||
* Overridden in FNDF to actually send to the floodfills.
|
||||
* @throws IllegalArgumentException if the local router info is invalid
|
||||
* or if this is a client DB
|
||||
*/
|
||||
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
|
||||
if (isClientDb())
|
||||
throw new IllegalArgumentException("RI publish to client DB");
|
||||
if (!_initialized) return;
|
||||
if (_context.router().gracefulShutdownInProgress())
|
||||
return;
|
||||
@ -1334,6 +1367,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
*/
|
||||
RouterInfo store(Hash key, RouterInfo routerInfo, boolean persist) throws IllegalArgumentException {
|
||||
if (!_initialized) return null;
|
||||
if (isClientDb())
|
||||
throw new IllegalArgumentException("RI store to client DB");
|
||||
|
||||
RouterInfo rv;
|
||||
try {
|
||||
@ -1435,7 +1470,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
DatabaseEntry o = _ds.get(dbEntry);
|
||||
if (o == null) {
|
||||
// if we dont know the key, lets make sure it isn't a now-dead peer
|
||||
_kb.remove(dbEntry);
|
||||
if (_kb != null)
|
||||
_kb.remove(dbEntry);
|
||||
_context.peerManager().removeCapabilities(dbEntry);
|
||||
return;
|
||||
}
|
||||
@ -1451,7 +1487,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
// are any updates
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping a lease: " + dbEntry);
|
||||
if (isMainDb()) {
|
||||
if (!isClientDb()) {
|
||||
_ds.remove(dbEntry, false);
|
||||
} else {
|
||||
// if this happens it's because we're a TransientDataStore instead,
|
||||
@ -1471,6 +1507,10 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
* Do NOT use for leasesets.
|
||||
*/
|
||||
void dropAfterLookupFailed(Hash peer) {
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return;
|
||||
}
|
||||
_context.peerManager().removeCapabilities(peer);
|
||||
_negativeCache.cache(peer);
|
||||
_kb.remove(peer);
|
||||
@ -1558,9 +1598,17 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
return leases;
|
||||
}
|
||||
|
||||
/** public for NetDbRenderer in routerconsole */
|
||||
/**
|
||||
* Public for NetDbRenderer in routerconsole
|
||||
*
|
||||
* @return empty set if this is a client DB
|
||||
*/
|
||||
@Override
|
||||
public Set<RouterInfo> getRouters() {
|
||||
if (isClientDb()) {
|
||||
_log.warn("Subdb", new Exception("I did it"));
|
||||
return Collections.emptySet();
|
||||
}
|
||||
if (!_initialized) return null;
|
||||
Set<RouterInfo> routers = new HashSet<RouterInfo>();
|
||||
for (DatabaseEntry o : getDataStore().getEntries()) {
|
||||
@ -1654,6 +1702,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
*/
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
if (_kb == null)
|
||||
return;
|
||||
out.write(_kb.toString().replace("\n", "<br>\n"));
|
||||
}
|
||||
|
||||
@ -1662,7 +1712,7 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
if (isMainDb())
|
||||
if (!isClientDb())
|
||||
return "Main NetDB";
|
||||
return "Client NetDB " + _dbid.toBase64();
|
||||
}
|
||||
|
@ -390,10 +390,7 @@ class SearchJob extends JobImpl {
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
|
||||
if (_facade.isClientDb())
|
||||
return getContext().netDb().getPeerSelector().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
|
||||
else
|
||||
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
|
||||
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -89,7 +89,7 @@ abstract class StoreJob extends JobImpl {
|
||||
_onFailure = onFailure;
|
||||
_timeoutMs = timeoutMs;
|
||||
_expiration = context.clock().now() + timeoutMs;
|
||||
_peerSelector = facade.createPeerSelector();
|
||||
_peerSelector = facade.getPeerSelector();
|
||||
if (data.isLeaseSet()) {
|
||||
_connectChecker = null;
|
||||
_connectMask = 0;
|
||||
@ -318,18 +318,10 @@ abstract class StoreJob extends JobImpl {
|
||||
*****/
|
||||
|
||||
private List<Hash> getClosestFloodfillRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
|
||||
List<Hash> rv;
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
KBucketSet<Hash> ks = _facade.getKBuckets();
|
||||
if (ks == null) return new ArrayList<Hash>();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + "(dbid: " + _facade._dbid + "): Selecting Floodfill Participants");
|
||||
if (_facade.isClientDb()) {
|
||||
FloodfillPeerSelector ffNetDbPS = (FloodfillPeerSelector)getContext().netDb().getPeerSelector();
|
||||
rv = ffNetDbPS.selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
|
||||
} else {
|
||||
rv = ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
|
||||
}
|
||||
List<Hash> rv = ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -138,11 +138,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
||||
return;
|
||||
if (!ri.isValid())
|
||||
return;
|
||||
RouterInfo oldri = null;
|
||||
if (_client != null)
|
||||
oldri = _context.clientNetDb(_client).lookupRouterInfoLocally(key);
|
||||
else
|
||||
oldri = _context.netDb().lookupRouterInfoLocally(key);
|
||||
RouterInfo oldri = _context.netDb().lookupRouterInfoLocally(key);
|
||||
// only update if RI is newer and non-ff
|
||||
if (oldri != null && oldri.getPublished() < ri.getPublished() &&
|
||||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
|
||||
|
Reference in New Issue
Block a user