Compare commits

...

4 Commits

8 changed files with 38 additions and 136 deletions

View File

@ -374,7 +374,6 @@ public class RouterContext extends I2PAppContext {
*/
public SegmentedNetworkDatabaseFacade netDbSegmentor() { return _netDb; }
public FloodfillNetworkDatabaseFacade netDb() { return _netDb.mainNetDB(); }
public FloodfillNetworkDatabaseFacade multihomeNetDb() { return _netDb.multiHomeNetDB(); }
public FloodfillNetworkDatabaseFacade clientNetDb(Hash id) { return _netDb.clientNetDB(id); }
/**
* The actual driver of the router, where all jobs are enqueued and processed.

View File

@ -1185,8 +1185,6 @@ class ClientConnectionRunner {
* @since 0.9.60
*/
public FloodfillNetworkDatabaseFacade getFloodfillNetworkDatabaseFacade() {
if (!_context.netDbSegmentor().useSubDbs())
return _context.netDb();
if (_log.shouldLog(Log.DEBUG))
_log.debug("getFloodfillNetworkDatabaseFacade is getting the subDb for dbid: " + this.getDestHash());
if (_floodfillNetworkDatabaseFacade == null) {

View File

@ -100,11 +100,6 @@ public class DummyNetworkDatabaseFacade extends SegmentedNetworkDatabaseFacade {
return _fndb;
}
@Override
public FloodfillNetworkDatabaseFacade multiHomeNetDB() {
return _fndb;
}
@Override
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
return _fndb;

View File

@ -147,34 +147,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// Only send it out if it is in our estimated keyspace.
// For this, we do NOT use their dontInclude list as it can't be trusted
// (i.e. it could mess up the closeness calculation)
LeaseSet possibleMultihomed = null;
if (getContext().netDbSegmentor().useSubDbs()) {
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
}
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(searchKey,
CLOSENESS_THRESHOLD, null);
if (weAreClosest(closestHashes)) {
// It's in our keyspace, so give it to them
// there is a slight chance that there is also a multihomed router in our cache at the
// same time we are closest to our locally published leaseSet. That means there is a slight
// chance an attacker can send a least as a store which goes into the multihome cache, then
// fetch back a locally-created, locally-published leaseset. BUT, if we always publish a
// multihomed leaseset even if we are closest to the local, we never send it out if a potential
// multihome is found in the cache.
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + ", answering query, in our keyspace");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1);
sendData(searchKey, ls, fromKey, toTunnel);
} else if (getContext().netDbSegmentor().useSubDbs() && possibleMultihomed != null) {
// If it's in the possibleMultihomed cache, then it was definitely stored to us meaning it is effectively
// always recievedAsPublished. No need to decide whether or not to answer the request like above, just
// answer it so it doesn't look different from other stores.
if (possibleMultihomed.getReceivedAsPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS, possibly from a multihomed router " + searchKey + ", and somebody requested it back from us. Answering query, as if in our keyspace, to avoid attack.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
}
} else {
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
@ -184,30 +164,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else {
LeaseSet possibleMultihomed = null;
if (getContext().netDbSegmentor().useSubDbs()) {
possibleMultihomed = getContext().multihomeNetDb().lookupLeaseSetLocally(searchKey);
}
if ((getContext().netDbSegmentor().useSubDbs()) && possibleMultihomed != null) {
if (possibleMultihomed.getReceivedAsPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("We have local LS " + searchKey + " in our multihomes cache meaning it was stored to us. Answering query with the stored LS.");
getContext().statManager().addRateData("netDb.lookupsMatchedLocalMultihome", 1);
sendData(searchKey, possibleMultihomed, fromKey, toTunnel);
}
} else {
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
// It was not published to us (we looked it up, for example)
// or it's local and we aren't floodfill,
// or it's local and we don't publish it.
// Lie, pretend we don't have it
if (_log.shouldLog(Log.INFO))
_log.info("We have LS " + searchKey +
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1);
Set<Hash> routerHashSet = getNearestRouters(lookupType);
sendClosest(searchKey, routerHashSet, fromKey, toTunnel);
}
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
lookupType != DatabaseLookupMessage.Type.LS) {

View File

@ -23,17 +23,15 @@ import net.i2p.util.Log;
* are identified by the hash of the primary session belonging to the client who "owns"
* a particular sub-netDb.
*
* There are 3 "Special" netDbs which have non-hash names:
* There is one "Special" netDb which has a non-hash name. This is used for the operation of
* router itself and not clients, in particular when acting as a floodfill:
*
* - Main NetDB: This is the netDb we use if or when we become a floodfill, and for
* direct interaction with other routers on the network, such as when we are communicating
* with a floodfill.
* - Multihome NetDB: This is used to stash leaseSets for our own sites when they are
* sent to us by a floodfill, so that we can reply when they are requested back from us
* regardless of our closeness to them in the routing table.
* - Exploratory NetDB: This is used when we want to stash a DatabaseEntry for a key
* during exploration but don't want it to go into the Main NetDB until we do something
* else with it.
*
* It is possible that it may be advantageous some day to have other netDb's for specific use
* cases, but that is not the purpose of this class at this time.
*
* And there are an unlimited number of "Client" netDbs. These sub-netDbs are
* intended to contain only the information required to operate them, and as such
@ -52,11 +50,9 @@ import net.i2p.util.Log;
public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseFacade {
protected final Log _log;
private RouterContext _context;
private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation";
//private static final String PROP_NETDB_ISOLATION = "router.netdb.isolation";
public static final Hash MAIN_DBID = null;
public static final Hash MULTIHOME_DBID = Hash.FAKE_HASH;
private final FloodfillNetworkDatabaseFacade _mainDbid;
private final FloodfillNetworkDatabaseFacade _multihomeDbid;
/**
* Construct a new FloodfillNetworkDatabaseSegmentor with the given
@ -71,12 +67,12 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
if (_context == null)
_context = context;
_mainDbid = new FloodfillNetworkDatabaseFacade(_context, MAIN_DBID);
_multihomeDbid = new FloodfillNetworkDatabaseFacade(_context, MULTIHOME_DBID);
}
/* Commented out prior to 2.4.0 release, might be worth resurrecting at some point
public boolean useSubDbs() {
return _context.getProperty(PROP_NETDB_ISOLATION, true);
}
}*/
/**
* Retrieves the FloodfillNetworkDatabaseFacade object for the specified ID.
@ -87,8 +83,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
*/
@Override
protected FloodfillNetworkDatabaseFacade getSubNetDB(Hash id) {
if (!useSubDbs())
return _mainDbid;
return _context.clientManager().getClientFloodfillNetworkDatabaseFacade(id);
}
@ -103,8 +97,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
if (_log.shouldLog(Log.DEBUG))
_log.debug("shutdown called from FNDS, shutting down main and multihome db");
_mainDbid.shutdown();
if (useSubDbs())
_multihomeDbid.shutdown();
}
/**
@ -117,8 +109,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
if (_log.shouldLog(Log.DEBUG))
_log.debug("startup called from FNDS, starting up main and multihome db");
_mainDbid.startup();
if (useSubDbs())
_multihomeDbid.startup();
}
/**
@ -256,17 +246,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
return _mainDbid;
}
/**
* get the multiHome netDb, which is especially for handling multihomes
*
* @since 0.9.60
* @return may be null
*/
@Override
public FloodfillNetworkDatabaseFacade multiHomeNetDB() {
return _multihomeDbid;
}
/**
* get the client netDb for the given id
* Will return the "exploratory(default client)" netDb if
@ -279,8 +258,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
public FloodfillNetworkDatabaseFacade clientNetDB(Hash id) {
if (_log.shouldDebug())
_log.debug("looked up clientNetDB: " + id);
if (!useSubDbs())
return _mainDbid;
if (id != null){
FloodfillNetworkDatabaseFacade fndf = getSubNetDB(id);
if (fndf != null)
@ -328,12 +305,7 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
if (!_mainDbid.isInitialized())
return Collections.emptySet();
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
if (!useSubDbs()) {
rv.add(_mainDbid);
return rv;
}
rv.add(_mainDbid);
rv.add(multiHomeNetDB());
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
return rv;
}
@ -350,10 +322,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
if (!_mainDbid.isInitialized())
return Collections.emptySet();
Set<FloodfillNetworkDatabaseFacade> rv = new HashSet<>();
if (!useSubDbs()) {
rv.add(_mainDbid);
return rv;
}
rv.addAll(_context.clientManager().getClientFloodfillNetworkDatabaseFacades());
return rv;
}
@ -368,7 +336,6 @@ public class FloodfillNetworkDatabaseSegmentor extends SegmentedNetworkDatabaseF
public List<BlindData> getLocalClientsBlindData() {
List<BlindData> rv = new ArrayList<>();
for (FloodfillNetworkDatabaseFacade subdb : getClientSubNetDBs()) {
rv.addAll(subdb.getBlindData());
}
return rv;
}

View File

@ -151,9 +151,9 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// If we're using subdbs, store the leaseSet in the multihome DB.
// otherwise, throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
if (getContext().netDbSegmentor().useSubDbs())
getContext().multihomeNetDb().store(key, ls);
else
//if (getContext().netDbSegmentor().useSubDbs())
//getContext().multihomeNetDb().store(key, ls);
//else
throw new IllegalArgumentException("(dbid: " + _facade._dbid
+ ") Peer attempted to store local leaseSet: "
+ key.toBase32());

View File

@ -338,8 +338,6 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// FNDS.MAIN_DBID is always null. and if _dbid is also null it is not a client Db
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
return false;
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
return false;
return true;
}
@ -370,8 +368,6 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
// FNDS.MAIN_DBID is always null, and if _dbid is null it is not the multihome Db
if (_dbid == FloodfillNetworkDatabaseSegmentor.MAIN_DBID)
return false;
if (_dbid.equals(FloodfillNetworkDatabaseSegmentor.MULTIHOME_DBID))
return true;
return false;
}
@ -885,24 +881,22 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad
_log.error("locally published leaseSet is not valid?", iae);
throw iae;
}
if (!_context.netDbSegmentor().useSubDbs()){
String dbid = "main netDb";
if (isClientDb()) {
dbid = "client netDb: " + _dbid;
}
if (_localKey != null) {
if (!_localKey.equals(localLeaseSet.getHash()))
if (_log.shouldLog(Log.ERROR))
_log.error("[" + dbid + "]" + "Error, the local LS hash ("
+ _localKey + ") does not match the published hash ("
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
new Exception());
} else {
// This will only happen once when the local LS is first published
_localKey = localLeaseSet.getHash();
if (_log.shouldLog(Log.INFO))
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
}
String dbid = "main netDb";
if (isClientDb()) {
dbid = "client netDb: " + _dbid;
}
if (_localKey != null) {
if (!_localKey.equals(localLeaseSet.getHash()))
if (_log.shouldLog(Log.ERROR))
_log.error("[" + dbid + "]" + "Error, the local LS hash ("
+ _localKey + ") does not match the published hash ("
+ localLeaseSet.getHash() + ")! This shouldn't happen!",
new Exception());
} else {
// This will only happen once when the local LS is first published
_localKey = localLeaseSet.getHash();
if (_log.shouldLog(Log.INFO))
_log.info("[" + dbid + "]" + "Local client LS key initialized to: " + _localKey);
}
if (!_context.clientManager().shouldPublishLeaseSet(h))
return;

View File

@ -60,16 +60,6 @@ public abstract class SegmentedNetworkDatabaseFacade {
public SegmentedNetworkDatabaseFacade(RouterContext context) {
// super(context, null);
}
/**
* Determine whether to use subDb defenses at all or to use the extant FNDF/RAP/RAR defenses
*
* @return true if using subDbs, false if not
* @since 0.9.60
*/
public boolean useSubDbs() {
return false;
}
/**
* Get a sub-netDb using a Hash identifier
@ -85,14 +75,6 @@ public abstract class SegmentedNetworkDatabaseFacade {
* @since 0.9.60
*/
public abstract FloodfillNetworkDatabaseFacade mainNetDB();
/**
* Get the multihome netDb, the one which is used if we're a floodfill AND we
* have a multihome address sent to us
*
* @return may be null if the multihome netDb is not initialized
* @since 0.9.60
*/
public abstract FloodfillNetworkDatabaseFacade multiHomeNetDB();
/**
* Get a client netDb for a given client Hash identifier. Will never
* return the mainNetDB.