forked from I2P_Developers/i2p.i2p
Compare commits
4 Commits
i2p.i2p-ne
...
i2p.i2p-2.
Author | SHA1 | Date | |
---|---|---|---|
44fce96cea | |||
569e2e89a0 | |||
492356d11b | |||
4c8a441e06 |
@ -374,7 +374,6 @@ public class RouterContext extends I2PAppContext {
|
|||||||
*/
|
*/
|
||||||
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
|
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
|
||||||
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
|
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
|
||||||
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
|
|
||||||
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
|
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
|
||||||
/**
|
/**
|
||||||
* The actual driver of the router, where all jobs are enqueued and processed.
|
* The actual driver of the router, where all jobs are enqueued and processed.
|
||||||
|
@ -1185,8 +1185,6 @@ class ClientConnectionRunner {
|
|||||||
* @since 0.9.60
|
* @since 0.9.60
|
||||||
*/
|
*/
|
||||||
public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() {
|
public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() {
|
||||||
if (!_context.netDbSegmentor().useSubDbs())
|
|
||||||
return _context.netDb();
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash());
|
_log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash());
|
||||||
if (_floodfillNetworkDatabaseFacade == null) {
|
if (_floodfillNetworkDatabaseFacade == null) {
|
||||||
|
@ -100,11 +100,6 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
|
|||||||
return _fndb;
|
return _fndb;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public FloodfillNetworkDatabaseFacade multiHomeNetDB() {
|
|
||||||
return _fndb;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
||||||
return _fndb;
|
return _fndb;
|
||||||
|
@ -147,34 +147,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
// Only send it out if it is in our estimated keyspace.
|
// Only send it out if it is in our estimated keyspace.
|
||||||
// For this, we do NOT use their dontInclude list as it can't be trusted
|
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||||
// (i.e. it could mess up the closeness calculation)
|
// (i.e. it could mess up the closeness calculation)
|
||||||
LeaseSet possibleMultihomed = null;
|
|
||||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
|
||||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
|
||||||
}
|
|
||||||
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
|
||||||
CLOSENESS_THRESHOLD, null);
|
CLOSENESS_THRESHOLD, null);
|
||||||
if (weAreClosest(closestHashes)) {
|
if (weAreClosest(closestHashes)) {
|
||||||
// It's in our keyspace, so give it to them
|
// It's in our keyspace, so give it to them
|
||||||
// there is a slight chance that there is also a multihomed router in our cache at the
|
|
||||||
// same time we are closest to our locally published leaseSet. That means there is a slight
|
|
||||||
// chance an attacker can send a least as a store which goes into the multihome cache, then
|
|
||||||
// fetch back a locally-created, locally-published leaseset. BUT, if we always publish a
|
|
||||||
// multihomed leaseset even if we are closest to the local, we never send it out if a potential
|
|
||||||
// multihome is found in the cache.
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
|
||||||
sendData(searchKey, ls, fromKey, toTunnel);
|
sendData(searchKey, ls, fromKey, toTunnel);
|
||||||
} else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) {
|
|
||||||
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
|
|
||||||
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
|
|
||||||
// answer it so it doesn't look different from other stores.
|
|
||||||
if (possibleMultihomed.getReceivedAsPublished()) {
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
|
||||||
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
|
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
|
||||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Lie, pretend we don't have it
|
// Lie, pretend we don't have it
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
@ -184,30 +164,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LeaseSet possibleMultihomed = null;
|
// It was not published to us (we looked it up, for example)
|
||||||
if (getContext().netDbSegmentor().useSubDbs()) {
|
// or it's local and we aren't floodfill,
|
||||||
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
|
// or it's local and we don't publish it.
|
||||||
}
|
// Lie, pretend we don't have it
|
||||||
if ((getContext().netDbSegmentor().useSubDbs()) && possibleMultihomed != null) {
|
if (_log.shouldLog(Log.INFO))
|
||||||
if (possibleMultihomed.getReceivedAsPublished()) {
|
_log.info("We have LS " + searchKey +
|
||||||
if (_log.shouldLog(Log.INFO))
|
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||||
_log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS.");
|
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
|
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
||||||
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
|
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
||||||
}
|
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
||||||
} else {
|
|
||||||
// It was not published to us (we looked it up, for example)
|
|
||||||
// or it's local and we aren't floodfill,
|
|
||||||
// or it's local and we don't publish it.
|
|
||||||
// Lie, pretend we don't have it
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
|
||||||
_log.info("We have LS " + searchKey +
|
|
||||||
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
|
||||||
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
|
|
||||||
Set<Hash> routerHashSet = getNearestRouters(lookupType);
|
|
||||||
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
|
||||||
lookupType != DatabaseLookupMessage.Type.LS) {
|
lookupType != DatabaseLookupMessage.Type.LS) {
|
||||||
|
@ -23,17 +23,15 @@ import net.i2p.util.Log;
|
|||||||
* are identified by the hash of the primary session belonging to the client who "owns"
|
* are identified by the hash of the primary session belonging to the client who "owns"
|
||||||
* a particular sub-netDb.
|
* a particular sub-netDb.
|
||||||
*
|
*
|
||||||
* There are 3 "Special" netDbs which have non-hash names:
|
* There is one "Special" netDb which has a non-hash name. This is used for the operation of
|
||||||
|
* router itself and not clients, in particular when acting as a floodfill:
|
||||||
*
|
*
|
||||||
* - Main NetDB: This is the netDb we use if or when we become a floodfill, and for
|
* - Main NetDB: This is the netDb we use if or when we become a floodfill, and for
|
||||||
* direct interaction with other routers on the network, such as when we are communicating
|
* direct interaction with other routers on the network, such as when we are communicating
|
||||||
* with a floodfill.
|
* with a floodfill.
|
||||||
* - Multihome NetDB: This is used to stash leaseSets for our own sites when they are
|
*
|
||||||
* sent to us by a floodfill, so that we can reply when they are requested back from us
|
* It is possible that it may be advantageous some day to have other netDb's for specific use
|
||||||
* regardless of our closeness to them in the routing table.
|
* cases, but that is not the purpose of this class at this time.
|
||||||
* - Exploratory NetDB: This is used when we want to stash a DatabaseEntry for a key
|
|
||||||
* during exploration but don't want it to go into the Main NetDB until we do something
|
|
||||||
* else with it.
|
|
||||||
*
|
*
|
||||||
* And there are an unlimited number of "Client" netDbs. These sub-netDbs are
|
* And there are an unlimited number of "Client" netDbs. These sub-netDbs are
|
||||||
* intended to contain only the information required to operate them, and as such
|
* intended to contain only the information required to operate them, and as such
|
||||||
@ -52,11 +50,9 @@ import net.i2p.util.Log;
|
|||||||
public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade {
|
public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade {
|
||||||
protected final Log _log;
|
protected final Log _log;
|
||||||
private RouterContext _context;
|
private RouterContext _context;
|
||||||
private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation";
|
//private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation";
|
||||||
public static final Hash MAIN_DBID = null;
|
public static final Hash MAIN_DBID = null;
|
||||||
public static final Hash MULTIHOME_DBID = Hash.FAKE_HASH;
|
|
||||||
private final FloodfillNetworkDatabaseFacade _mainDbid;
|
private final FloodfillNetworkDatabaseFacade _mainDbid;
|
||||||
private final FloodfillNetworkDatabaseFacade _multihomeDbid;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Construct a new FloodfillNetworkDatabaseSegmentor with the given
|
* Construct a new FloodfillNetworkDatabaseSegmentor with the given
|
||||||
@ -71,12 +67,12 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
if (_context == null)
|
if (_context == null)
|
||||||
_context = context;
|
_context = context;
|
||||||
_mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID);
|
_mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID);
|
||||||
_multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Commented out prior to 2.4.0 release, might be worth resurrecting at some point
|
||||||
public boolean useSubDbs() {
|
public boolean useSubDbs() {
|
||||||
return _context.getProperty(PROP_NETDB_ISOLATION, true);
|
return _context.getProperty(PROP_NETDB_ISOLATION, true);
|
||||||
}
|
}*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID.
|
* Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID.
|
||||||
@ -87,8 +83,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
|
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
|
||||||
if (!useSubDbs())
|
|
||||||
return _mainDbid;
|
|
||||||
return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id);
|
return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,8 +97,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("shutdown called from FNDS, shutting down main and multihome db");
|
_log.debug("shutdown called from FNDS, shutting down main and multihome db");
|
||||||
_mainDbid.shutdown();
|
_mainDbid.shutdown();
|
||||||
if (useSubDbs())
|
|
||||||
_multihomeDbid.shutdown();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -117,8 +109,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("startup called from FNDS, starting up main and multihome db");
|
_log.debug("startup called from FNDS, starting up main and multihome db");
|
||||||
_mainDbid.startup();
|
_mainDbid.startup();
|
||||||
if (useSubDbs())
|
|
||||||
_multihomeDbid.startup();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -256,17 +246,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
return _mainDbid;
|
return _mainDbid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* get the multiHome netDb, which is especially for handling multihomes
|
|
||||||
*
|
|
||||||
* @since 0.9.60
|
|
||||||
* @return may be null
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public FloodfillNetworkDatabaseFacade multiHomeNetDB() {
|
|
||||||
return _multihomeDbid;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get the client netDb for the given id
|
* get the client netDb for the given id
|
||||||
* Will return the "exploratory(default client)" netDb if
|
* Will return the "exploratory(default client)" netDb if
|
||||||
@ -279,8 +258,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
|
||||||
if (_log.shouldDebug())
|
if (_log.shouldDebug())
|
||||||
_log.debug("looked up clientNetDB: " + id);
|
_log.debug("looked up clientNetDB: " + id);
|
||||||
if (!useSubDbs())
|
|
||||||
return _mainDbid;
|
|
||||||
if (id != null){
|
if (id != null){
|
||||||
FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id);
|
FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id);
|
||||||
if (fndf != null)
|
if (fndf != null)
|
||||||
@ -328,12 +305,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
if (!_mainDbid.isInitialized())
|
if (!_mainDbid.isInitialized())
|
||||||
return Collections.emptySet();
|
return Collections.emptySet();
|
||||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||||
if (!useSubDbs()) {
|
|
||||||
rv.add(_mainDbid);
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
rv.add(_mainDbid);
|
rv.add(_mainDbid);
|
||||||
rv.add(multiHomeNetDB());
|
|
||||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
@ -350,10 +322,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
if (!_mainDbid.isInitialized())
|
if (!_mainDbid.isInitialized())
|
||||||
return Collections.emptySet();
|
return Collections.emptySet();
|
||||||
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
|
||||||
if (!useSubDbs()) {
|
|
||||||
rv.add(_mainDbid);
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
@ -368,7 +336,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
|
|||||||
public List<BlindData> getLocalClientsBlindData() {
|
public List<BlindData> getLocalClientsBlindData() {
|
||||||
List<BlindData> rv = new ArrayList<>();
|
List<BlindData> rv = new ArrayList<>();
|
||||||
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
|
||||||
rv.addAll(subdb.getBlindData());
|
|
||||||
}
|
}
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
@ -151,9 +151,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
// If we're using subdbs, store the leaseSet in the multihome DB.
|
// If we're using subdbs, store the leaseSet in the multihome DB.
|
||||||
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
|
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
|
||||||
dontBlamePeer = true;
|
dontBlamePeer = true;
|
||||||
if (getContext().netDbSegmentor().useSubDbs())
|
//if (getContext().netDbSegmentor().useSubDbs())
|
||||||
getContext().multihomeNetDb().store(key, ls);
|
//getContext().multihomeNetDb().store(key, ls);
|
||||||
else
|
//else
|
||||||
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
throw new IllegalArgumentException("(dbid: " + _facade._dbid
|
||||||
+ ") Peer attempted to store local leaseSet: "
|
+ ") Peer attempted to store local leaseSet: "
|
||||||
+ key.toBase32());
|
+ key.toBase32());
|
||||||
|
@ -338,8 +338,6 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
|||||||
// FNDS.MAIN_DBID is always null. and if _dbid is also null it is not a client Db
|
// FNDS.MAIN_DBID is always null. and if _dbid is also null it is not a client Db
|
||||||
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
||||||
return false;
|
return false;
|
||||||
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
|
|
||||||
return false;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -370,8 +368,6 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
|||||||
// FNDS.MAIN_DBID is always null, and if _dbid is null it is not the multihome Db
|
// FNDS.MAIN_DBID is always null, and if _dbid is null it is not the multihome Db
|
||||||
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
|
||||||
return false;
|
return false;
|
||||||
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
|
|
||||||
return true;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -885,24 +881,22 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
|
|||||||
_log.error("locally published leaseSet is not valid?", iae);
|
_log.error("locally published leaseSet is not valid?", iae);
|
||||||
throw iae;
|
throw iae;
|
||||||
}
|
}
|
||||||
if (!_context.netDbSegmentor().useSubDbs()){
|
String dbid = "main netDb";
|
||||||
String dbid = "main netDb";
|
if (isClientDb()) {
|
||||||
if (isClientDb()) {
|
dbid = "client netDb: " + _dbid;
|
||||||
dbid = "client netDb: " + _dbid;
|
}
|
||||||
}
|
if (_localKey != null) {
|
||||||
if (_localKey != null) {
|
if (!_localKey.equals(localLeaseSet.getHash()))
|
||||||
if (!_localKey.equals(localLeaseSet.getHash()))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
if (_log.shouldLog(Log.ERROR))
|
_log.error("[" + dbid + "]" + "Error, the local LS hash ("
|
||||||
_log.error("[" + dbid + "]" + "Error, the local LS hash ("
|
+ _localKey + ") does not match the published hash ("
|
||||||
+ _localKey + ") does not match the published hash ("
|
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
|
||||||
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
|
new Exception());
|
||||||
new Exception());
|
} else {
|
||||||
} else {
|
// This will only happen once when the local LS is first published
|
||||||
// This will only happen once when the local LS is first published
|
_localKey = localLeaseSet.getHash();
|
||||||
_localKey = localLeaseSet.getHash();
|
if (_log.shouldLog(Log.INFO))
|
||||||
if (_log.shouldLog(Log.INFO))
|
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
|
||||||
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
||||||
return;
|
return;
|
||||||
|
@ -60,16 +60,6 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
|||||||
public SegmentedNetworkDatabaseFacade(RouterContext context) {
|
public SegmentedNetworkDatabaseFacade(RouterContext context) {
|
||||||
// super(context, null);
|
// super(context, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Determine whether to use subDb defenses at all or to use the extant FNDF/RAP/RAR defenses
|
|
||||||
*
|
|
||||||
* @return true if using subDbs, false if not
|
|
||||||
* @since 0.9.60
|
|
||||||
*/
|
|
||||||
public boolean useSubDbs() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a sub-netDb using a Hash identifier
|
* Get a sub-netDb using a Hash identifier
|
||||||
@ -85,14 +75,6 @@ public abstract class SegmentedNetworkDatabaseFacade {
|
|||||||
* @since 0.9.60
|
* @since 0.9.60
|
||||||
*/
|
*/
|
||||||
public abstract FloodfillNetworkDatabaseFacade mainNetDB();
|
public abstract FloodfillNetworkDatabaseFacade mainNetDB();
|
||||||
/**
|
|
||||||
* Get the multihome netDb, the one which is used if we're a floodfill AND we
|
|
||||||
* have a multihome address sent to us
|
|
||||||
*
|
|
||||||
* @return may be null if the multihome netDb is not initialized
|
|
||||||
* @since 0.9.60
|
|
||||||
*/
|
|
||||||
public abstract FloodfillNetworkDatabaseFacade multiHomeNetDB();
|
|
||||||
/**
|
/**
|
||||||
* Get a client netDb for a given client Hash identifier. Will never
|
* Get a client netDb for a given client Hash identifier. Will never
|
||||||
* return the mainNetDB.
|
* return the mainNetDB.
|
||||||
|
Reference in New Issue
Block a user