Compare commits

...

2 Commits

10 changed files with 37 additions and 17 deletions

View File

@ -39,6 +39,7 @@ import net.i2p.data.KeysAndCert;
import net.i2p.data.Signature; import net.i2p.data.Signature;
import net.i2p.data.SimpleDataStructure; import net.i2p.data.SimpleDataStructure;
import net.i2p.router.Router; import net.i2p.router.Router;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Clock; import net.i2p.util.Clock;
import net.i2p.util.Log; import net.i2p.util.Log;
import net.i2p.util.OrderedProperties; import net.i2p.util.OrderedProperties;
@ -723,4 +724,9 @@ public class RouterInfo extends DatabaseEntry {
if (fail) if (fail)
System.exit(1); System.exit(1);
} }
public boolean isFloodfill() {
String caps = this.getCapabilities();
return caps.indexOf(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL) >= 0;
}
} }

View File

@ -35,7 +35,7 @@ import net.i2p.util.SystemVersion;
*/ */
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade { public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
public static final char CAPABILITY_FLOODFILL = 'f'; public static final char CAPABILITY_FLOODFILL = 'f';
private static final String MINIMUM_SUBDB_PEERS = "router.subDbMinimumPeers"; private static final String ALWAYS_CONSIDER_PEER_FLOODFILL = "router.ignoreFloodfillCapability";
private final Map<Hash, FloodSearchJob> _activeFloodQueries; private final Map<Hash, FloodSearchJob> _activeFloodQueries;
private boolean _floodfillEnabled; private boolean _floodfillEnabled;
private final Set<Hash> _verifiesInProgress; private final Set<Hash> _verifiesInProgress;
@ -98,6 +98,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_ffMonitor = new FloodfillMonitorJob(_context, this); _ffMonitor = new FloodfillMonitorJob(_context, this);
} }
public boolean ignoreFloodfillCapability() {
return _context.getBooleanProperty(ALWAYS_CONSIDER_PEER_FLOODFILL);
}
@Override @Override
public synchronized void startup() { public synchronized void startup() {
boolean isFF; boolean isFF;
@ -435,11 +439,20 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
/** /**
* @param peer may be null, returns false if null * @param peer may be null, returns false if null
* always returns true if ignoreFloodfillCapability()
*/ */
public static boolean isFloodfill(RouterInfo peer) { public boolean isFloodfill(RouterInfo peer) {
if (peer == null) return false; if (ignoreFloodfillCapability()) return true;
String caps = peer.getCapabilities(); return peer.isFloodfill();
return caps.indexOf(CAPABILITY_FLOODFILL) >= 0; }
/**
* @param peer may be null, returns false if null
* always returns false if ignoreFloodfillCapability()
*/
public boolean isNotFloodfill(RouterInfo peer) {
if (ignoreFloodfillCapability()) return false;
return !peer.isFloodfill();
} }
public List<RouterInfo> getKnownRouterData() { public List<RouterInfo> getKnownRouterData() {

View File

@ -377,7 +377,7 @@ class FloodfillPeerSelector extends PeerSelector {
//if (info == null) //if (info == null)
// return; // return;
if (info != null && FloodfillNetworkDatabaseFacade.isFloodfill(info)) { if (info != null && ((FloodfillNetworkDatabaseFacade) _context.netDb()).isFloodfill(info)) {
_floodfillMatches.add(entry); _floodfillMatches.add(entry);
} else { } else {
// This didn't really work because we stopped filling up when _wanted == _matches, // This didn't really work because we stopped filling up when _wanted == _matches,

View File

@ -39,7 +39,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
@Override @Override
protected boolean answerAllQueries() { protected boolean answerAllQueries() {
if (!getContext().netDb().floodfillEnabled()) return false; if (!getContext().netDb().floodfillEnabled()) return false;
return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo()); return ((FloodfillNetworkDatabaseFacade)getContext().netDb()).isFloodfill(getContext().router().getRouterInfo());
} }
/** /**

View File

@ -82,7 +82,7 @@ class IterativeLookupJob extends JobImpl {
} }
newPeers++; newPeers++;
} else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 || } else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 ||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { ((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri)) {
// get an updated RI from the (now ff?) peer // get an updated RI from the (now ff?) peer
// Only if original search used expl. tunnels // Only if original search used expl. tunnels
if (_search.getFromHash() == null) { if (_search.getFromHash() == null) {

View File

@ -600,8 +600,9 @@ public class IterativeSearchJob extends FloodSearchJob {
return; return;
} }
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer); RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri != null && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { if (ri != null && ((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri)) {
if (_log.shouldLog(Log.INFO)) if ((FloodfillNetworkDatabaseFacade) getContext().netDb()).ignoreFloodfillCapability())
if (_log.shouldLog(Log.INFO)))
_log.info(getJobId() + ": non-ff peer from DSRM " + peer); _log.info(getJobId() + ": non-ff peer from DSRM " + peer);
return; return;
} }

View File

@ -324,7 +324,7 @@ class SearchJob extends JobImpl {
_state.replyTimeout(peer); _state.replyTimeout(peer);
} else { } else {
RouterInfo ri = (RouterInfo)ds; RouterInfo ri = (RouterInfo)ds;
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) { if (((FloodfillNetworkDatabaseFacade) _facade).isNotFloodfill(ri)) {
_floodfillPeersExhausted = true; _floodfillPeersExhausted = true;
if (onlyFloodfill) if (onlyFloodfill)
continue; continue;
@ -481,7 +481,7 @@ class SearchJob extends JobImpl {
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade,
this, outTunnel, inTunnel); this, outTunnel, inTunnel);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router)) if (((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(router))
_floodfillSearchesOutstanding++; _floodfillSearchesOutstanding++;
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router)); getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router));
// TODO pass a priority to the dispatcher // TODO pass a priority to the dispatcher
@ -517,7 +517,7 @@ class SearchJob extends JobImpl {
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, to, SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, to,
reply, new FailedJob(getContext(), router), sel, timeout, reply, new FailedJob(getContext(), router), sel, timeout,
OutNetMessage.PRIORITY_EXPLORATORY, _msgIDBloomXor); OutNetMessage.PRIORITY_EXPLORATORY, _msgIDBloomXor);
if (FloodfillNetworkDatabaseFacade.isFloodfill(router)) if (((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(router))
_floodfillSearchesOutstanding++; _floodfillSearchesOutstanding++;
j.runJob(); j.runJob();
//getContext().jobQueue().addJob(j); //getContext().jobQueue().addJob(j);
@ -608,7 +608,7 @@ class SearchJob extends JobImpl {
_penalizePeer = penalizePeer; _penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
_sentOn = enclosingContext.clock().now(); _sentOn = enclosingContext.clock().now();
_isFloodfill = FloodfillNetworkDatabaseFacade.isFloodfill(peer); _isFloodfill = ((FloodfillNetworkDatabaseFacade) _facade).isFloodfill(peer);
} }
public void runJob() { public void runJob() {
if (_isFloodfill) if (_isFloodfill)

View File

@ -44,7 +44,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
super(context); super(context);
_log = context.logManager().getLog(SearchUpdateReplyFoundJob.class); _log = context.logManager().getLog(SearchUpdateReplyFoundJob.class);
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
_isFloodfillPeer = FloodfillNetworkDatabaseFacade.isFloodfill(peer); _isFloodfillPeer = ((FloodfillNetworkDatabaseFacade) facade).isFloodfill(peer);
_state = state; _state = state;
_facade = facade; _facade = facade;
_job = job; _job = job;

View File

@ -48,7 +48,7 @@ class SingleLookupJob extends JobImpl {
if (ri == null) if (ri == null)
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, from)); getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, from));
else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 || else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 ||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) ((FloodfillNetworkDatabaseFacade) getContext().netDb()).isNotFloodfill(ri))
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer)); getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer));
} }
} }

View File

@ -121,7 +121,7 @@ class CapacityCalculator {
if (ndb != null) { if (ndb != null) {
RouterInfo ri = (RouterInfo) ndb.lookupLocallyWithoutValidation(profile.getPeer()); RouterInfo ri = (RouterInfo) ndb.lookupLocallyWithoutValidation(profile.getPeer());
if (ri != null) { if (ri != null) {
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) if (((FloodfillNetworkDatabaseFacade) ndb).isNotFloodfill(ri))
capacity += BONUS_NON_FLOODFILL; capacity += BONUS_NON_FLOODFILL;
String caps = ri.getCapabilities(); String caps = ri.getCapabilities();
if (caps.indexOf(Router.CAPABILITY_REACHABLE) < 0) if (caps.indexOf(Router.CAPABILITY_REACHABLE) < 0)