netdb class cleanup

This commit is contained in:
zzz
2018-10-08 12:13:24 +00:00
parent 7dfee5f0ab
commit 5041bb8531
7 changed files with 9 additions and 16 deletions

View File

@@ -37,7 +37,7 @@ import net.i2p.util.Log;
* after it loses (or never had) floodfill references, as long as it
* knows one peer that is up.
*/
class FloodOnlySearchJob extends FloodSearchJob {
abstract class FloodOnlySearchJob extends FloodSearchJob {
private boolean _shouldProcessDSRM;
private final HashSet<Hash> _unheardFrom;

View File

@@ -21,7 +21,7 @@ import net.i2p.util.Log;
*
* Note that this does NOT extend SearchJob.
*/
public class FloodSearchJob extends JobImpl {
abstract class FloodSearchJob extends JobImpl {
protected final Log _log;
protected final FloodfillNetworkDatabaseFacade _facade;
protected final Hash _key;

View File

@@ -33,7 +33,7 @@ import net.i2p.util.Log;
* Receive DatabaseStoreMessage data and store it in the local net db
*
*/
public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
private final Log _log;
private final DatabaseStoreMessage _message;
private final RouterIdentity _from;

View File

@@ -52,7 +52,7 @@ import net.i2p.util.Log;
* Kademlia based version of the network database.
* Never instantiated directly; see FloodfillNetworkDatabaseFacade.
*/
public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
protected final Log _log;
private KBucketSet<Hash> _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
@@ -187,7 +187,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return _initialized && _ds != null && _ds.isInitialized();
}
protected PeerSelector createPeerSelector() { return new PeerSelector(_context); }
protected abstract PeerSelector createPeerSelector();
public PeerSelector getPeerSelector() { return _peerSelector; }
/** @since 0.9 */
@@ -1281,14 +1281,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/** unused (overridden in FNDF) */
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore) {
if ( (ds == null) || (key == null) ) {
if (onFailure != null)
_context.jobQueue().addJob(onFailure);
return;
}
_context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
public abstract void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set<Hash> toIgnore);
/**
* Increment in the negative lookup cache

View File

@@ -26,7 +26,7 @@ import net.i2p.util.Log;
/**
* Mostly unused, see overrides in FloodfillPeerSelector
*/
class PeerSelector {
abstract class PeerSelector {
protected final Log _log;
protected final RouterContext _context;

View File

@@ -20,7 +20,7 @@ import net.i2p.util.Log;
* if the client is still connected.
*
*/
public class RepublishLeaseSetJob extends JobImpl {
class RepublishLeaseSetJob extends JobImpl {
private final Log _log;
public final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
private final static int RETRY_DELAY = 20*1000;

View File

@@ -38,7 +38,7 @@ import net.i2p.util.VersionComparator;
*
* Unused directly - see FloodfillStoreJob
*/
class StoreJob extends JobImpl {
abstract class StoreJob extends JobImpl {
protected final Log _log;
private final KademliaNetworkDatabaseFacade _facade;
protected final StoreState _state;