forked from I2P_Developers/i2p.i2p
propagate from branch 'i2p.i2p.zzz.test' (head 9b243b031d937eaefcd4c15ae61bb4fa280d78f3)
to branch 'i2p.i2p' (head 9383c447b12abf45f80bd0059f719acfce4c70a3)
This commit is contained in:
@@ -346,8 +346,12 @@ class PersistentDataStore extends TransientDataStore {
|
|||||||
_alreadyWarned = false;
|
_alreadyWarned = false;
|
||||||
for (int i = 0; i < routerInfoFiles.length; i++) {
|
for (int i = 0; i < routerInfoFiles.length; i++) {
|
||||||
Hash key = getRouterInfoHash(routerInfoFiles[i].getName());
|
Hash key = getRouterInfoHash(routerInfoFiles[i].getName());
|
||||||
if ( (key != null) && (!isKnown(key)) )
|
if ( (key != null) && (!isKnown(key)) ) {
|
||||||
PersistentDataStore.this._context.jobQueue().addJob(new ReadRouterJob(routerInfoFiles[i], key));
|
// Run it inline so we don't clog up the job queue, esp. at startup
|
||||||
|
// Also this allows us to wait until it is really done to call checkReseed() and set _initialized
|
||||||
|
//PersistentDataStore.this._context.jobQueue().addJob(new ReadRouterJob(routerInfoFiles[i], key));
|
||||||
|
(new ReadRouterJob(routerInfoFiles[i], key)).runJob();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
|
@@ -58,7 +58,7 @@ public class ProfileOrganizer {
|
|||||||
private ProfilePersistenceHelper _persistenceHelper;
|
private ProfilePersistenceHelper _persistenceHelper;
|
||||||
|
|
||||||
/** PeerProfile objects for all peers profiled, orderd by the ones with the highest capacity first */
|
/** PeerProfile objects for all peers profiled, orderd by the ones with the highest capacity first */
|
||||||
private Set _strictCapacityOrder;
|
private Set<PeerProfile> _strictCapacityOrder;
|
||||||
|
|
||||||
/** threshold speed value, seperating fast from slow */
|
/** threshold speed value, seperating fast from slow */
|
||||||
private double _thresholdSpeedValue;
|
private double _thresholdSpeedValue;
|
||||||
@@ -129,9 +129,9 @@ public class ProfileOrganizer {
|
|||||||
/** @return true if the lock was acquired */
|
/** @return true if the lock was acquired */
|
||||||
private boolean getWriteLock() {
|
private boolean getWriteLock() {
|
||||||
try {
|
try {
|
||||||
boolean rv = _reorganizeLock.writeLock().tryLock(5000, TimeUnit.MILLISECONDS);
|
boolean rv = _reorganizeLock.writeLock().tryLock(3000, TimeUnit.MILLISECONDS);
|
||||||
if (!rv)
|
if ((!rv) && _log.shouldLog(Log.WARN))
|
||||||
_log.error("no lock, size is: " + _reorganizeLock.getQueueLength(), new Exception("rats"));
|
_log.warn("no lock, size is: " + _reorganizeLock.getQueueLength(), new Exception("rats"));
|
||||||
return rv;
|
return rv;
|
||||||
} catch (InterruptedException ie) {}
|
} catch (InterruptedException ie) {}
|
||||||
return false;
|
return false;
|
||||||
@@ -199,15 +199,15 @@ public class ProfileOrganizer {
|
|||||||
|
|
||||||
getReadLock();
|
getReadLock();
|
||||||
try {
|
try {
|
||||||
for (Iterator iter = _failingPeers.values().iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _failingPeers.values().iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
if (profile.getLastSendSuccessful() >= hideBefore)
|
if (profile.getLastSendSuccessful() >= hideBefore)
|
||||||
activePeers++;
|
activePeers++;
|
||||||
else if (profile.getLastHeardFrom() >= hideBefore)
|
else if (profile.getLastHeardFrom() >= hideBefore)
|
||||||
activePeers++;
|
activePeers++;
|
||||||
}
|
}
|
||||||
for (Iterator iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
if (profile.getLastSendSuccessful() >= hideBefore)
|
if (profile.getLastSendSuccessful() >= hideBefore)
|
||||||
activePeers++;
|
activePeers++;
|
||||||
else if (profile.getLastHeardFrom() >= hideBefore)
|
else if (profile.getLastHeardFrom() >= hideBefore)
|
||||||
@@ -509,17 +509,17 @@ public class ProfileOrganizer {
|
|||||||
* and peers requiring introducers.
|
* and peers requiring introducers.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public List selectPeersLocallyUnreachable() {
|
public List<Hash> selectPeersLocallyUnreachable() {
|
||||||
List n;
|
List<Hash> n;
|
||||||
int count;
|
int count;
|
||||||
getReadLock();
|
getReadLock();
|
||||||
try {
|
try {
|
||||||
count = _notFailingPeers.size();
|
count = _notFailingPeers.size();
|
||||||
n = new ArrayList(_notFailingPeers.keySet());
|
n = new ArrayList(_notFailingPeers.keySet());
|
||||||
} finally { releaseReadLock(); }
|
} finally { releaseReadLock(); }
|
||||||
List l = new ArrayList(count / 4);
|
List<Hash> l = new ArrayList(count / 4);
|
||||||
for (Iterator iter = n.iterator(); iter.hasNext(); ) {
|
for (Iterator<Hash> iter = n.iterator(); iter.hasNext(); ) {
|
||||||
Hash peer = (Hash)iter.next();
|
Hash peer = iter.next();
|
||||||
if (_context.commSystem().wasUnreachable(peer))
|
if (_context.commSystem().wasUnreachable(peer))
|
||||||
l.add(peer);
|
l.add(peer);
|
||||||
else {
|
else {
|
||||||
@@ -570,8 +570,8 @@ public class ProfileOrganizer {
|
|||||||
long cutoff = _context.clock().now() - (20*1000);
|
long cutoff = _context.clock().now() - (20*1000);
|
||||||
int count = _notFailingPeers.size();
|
int count = _notFailingPeers.size();
|
||||||
List l = new ArrayList(count / 128);
|
List l = new ArrayList(count / 128);
|
||||||
for (Iterator iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile prof = (PeerProfile) iter.next();
|
PeerProfile prof = iter.next();
|
||||||
if (prof.getTunnelHistory().getLastRejectedBandwidth() > cutoff)
|
if (prof.getTunnelHistory().getLastRejectedBandwidth() > cutoff)
|
||||||
l.add(prof.getPeer());
|
l.add(prof.getPeer());
|
||||||
}
|
}
|
||||||
@@ -631,16 +631,16 @@ public class ProfileOrganizer {
|
|||||||
return;
|
return;
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
try {
|
try {
|
||||||
Set allPeers = _strictCapacityOrder; //new HashSet(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
|
Set<PeerProfile> allPeers = _strictCapacityOrder; //new HashSet(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
|
||||||
//allPeers.addAll(_failingPeers.values());
|
//allPeers.addAll(_failingPeers.values());
|
||||||
//allPeers.addAll(_notFailingPeers.values());
|
//allPeers.addAll(_notFailingPeers.values());
|
||||||
//allPeers.addAll(_highCapacityPeers.values());
|
//allPeers.addAll(_highCapacityPeers.values());
|
||||||
//allPeers.addAll(_fastPeers.values());
|
//allPeers.addAll(_fastPeers.values());
|
||||||
|
|
||||||
Set reordered = new TreeSet(_comp);
|
Set<PeerProfile> reordered = new TreeSet(_comp);
|
||||||
long sortStart = System.currentTimeMillis();
|
long sortStart = System.currentTimeMillis();
|
||||||
for (Iterator iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile prof = (PeerProfile)iter.next();
|
PeerProfile prof = iter.next();
|
||||||
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) )
|
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) )
|
||||||
continue; // drop, but no need to delete, since we don't periodically reread
|
continue; // drop, but no need to delete, since we don't periodically reread
|
||||||
|
|
||||||
@@ -668,12 +668,13 @@ public class ProfileOrganizer {
|
|||||||
|
|
||||||
long placeStart = System.currentTimeMillis();
|
long placeStart = System.currentTimeMillis();
|
||||||
|
|
||||||
for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
locked_placeProfile(profile);
|
locked_placeProfile(profile);
|
||||||
}
|
}
|
||||||
|
|
||||||
locked_unfailAsNecessary();
|
locked_unfailAsNecessary();
|
||||||
|
locked_demoteHighCapAsNecessary();
|
||||||
locked_promoteFastAsNecessary();
|
locked_promoteFastAsNecessary();
|
||||||
locked_demoteFastAsNecessary();
|
locked_demoteFastAsNecessary();
|
||||||
|
|
||||||
@@ -720,8 +721,8 @@ public class ProfileOrganizer {
|
|||||||
if (numToPromote > 0) {
|
if (numToPromote > 0) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("Need to explicitly promote " + numToPromote + " peers to the fast group");
|
_log.info("Need to explicitly promote " + numToPromote + " peers to the fast group");
|
||||||
for (Iterator iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile cur = (PeerProfile)iter.next();
|
PeerProfile cur = iter.next();
|
||||||
if ( (!_fastPeers.containsKey(cur.getPeer())) && (!cur.getIsFailing()) ) {
|
if ( (!_fastPeers.containsKey(cur.getPeer())) && (!cur.getIsFailing()) ) {
|
||||||
if (!isSelectable(cur.getPeer())) {
|
if (!isSelectable(cur.getPeer())) {
|
||||||
// skip peers we dont have in the netDb
|
// skip peers we dont have in the netDb
|
||||||
@@ -771,6 +772,34 @@ public class ProfileOrganizer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* We want to put a limit on the high cap pool, to use only a small set of routers
|
||||||
|
* for expl. tunnels for anonymity reasons. Also, unless we use only a small
|
||||||
|
* number, we don't really find out who the high capacity ones are.
|
||||||
|
* @since 0.7.11
|
||||||
|
*/
|
||||||
|
private void locked_demoteHighCapAsNecessary() {
|
||||||
|
int maxHighCapPeers = getMaximumHighCapPeers();
|
||||||
|
int numToDemote = _highCapacityPeers.size() - maxHighCapPeers;
|
||||||
|
if (numToDemote > 0) {
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Need to explicitly demote " + numToDemote + " peers from the high cap group");
|
||||||
|
// sorted by capacity, highest-first
|
||||||
|
Iterator<PeerProfile> iter = _strictCapacityOrder.iterator();
|
||||||
|
for (int i = 0; iter.hasNext() && i < maxHighCapPeers; ) {
|
||||||
|
if (_highCapacityPeers.containsKey(iter.next().getPeer()))
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
for (int i = 0; iter.hasNext() && i < numToDemote; ) {
|
||||||
|
Hash h = iter.next().getPeer();
|
||||||
|
if (_highCapacityPeers.remove(h) != null) {
|
||||||
|
_fastPeers.remove(h);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** how many not failing/active peers must we have? */
|
/** how many not failing/active peers must we have? */
|
||||||
private final static int MIN_NOT_FAILING_ACTIVE = 3;
|
private final static int MIN_NOT_FAILING_ACTIVE = 3;
|
||||||
/**
|
/**
|
||||||
@@ -781,9 +810,9 @@ public class ProfileOrganizer {
|
|||||||
*/
|
*/
|
||||||
private void locked_unfailAsNecessary() {
|
private void locked_unfailAsNecessary() {
|
||||||
int notFailingActive = 0;
|
int notFailingActive = 0;
|
||||||
for (Iterator iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) {
|
for (Iterator<Hash> iter = _notFailingPeers.keySet().iterator(); iter.hasNext(); ) {
|
||||||
Hash key = (Hash)iter.next();
|
Hash key = iter.next();
|
||||||
PeerProfile peer = (PeerProfile)_notFailingPeers.get(key);
|
PeerProfile peer = _notFailingPeers.get(key);
|
||||||
if (peer.getIsActive())
|
if (peer.getIsActive())
|
||||||
notFailingActive++;
|
notFailingActive++;
|
||||||
if (notFailingActive >= MIN_NOT_FAILING_ACTIVE) {
|
if (notFailingActive >= MIN_NOT_FAILING_ACTIVE) {
|
||||||
@@ -796,8 +825,8 @@ public class ProfileOrganizer {
|
|||||||
int needToUnfail = MIN_NOT_FAILING_ACTIVE - notFailingActive;
|
int needToUnfail = MIN_NOT_FAILING_ACTIVE - notFailingActive;
|
||||||
if (needToUnfail > 0) {
|
if (needToUnfail > 0) {
|
||||||
int unfailed = 0;
|
int unfailed = 0;
|
||||||
for (Iterator iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = _strictCapacityOrder.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile best = (PeerProfile)iter.next();
|
PeerProfile best = iter.next();
|
||||||
if ( (best.getIsActive()) && (best.getIsFailing()) ) {
|
if ( (best.getIsActive()) && (best.getIsFailing()) ) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("All peers were failing, so we have overridden the failing flag for one of the most reliable active peers (" + best.getPeer().toBase64() + ")");
|
_log.warn("All peers were failing, so we have overridden the failing flag for one of the most reliable active peers (" + best.getPeer().toBase64() + ")");
|
||||||
@@ -828,8 +857,8 @@ public class ProfileOrganizer {
|
|||||||
double totalCapacity = 0;
|
double totalCapacity = 0;
|
||||||
double totalIntegration = 0;
|
double totalIntegration = 0;
|
||||||
Set reordered = new TreeSet(_comp);
|
Set reordered = new TreeSet(_comp);
|
||||||
for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
|
|
||||||
if (_us.equals(profile.getPeer())) continue;
|
if (_us.equals(profile.getPeer())) continue;
|
||||||
|
|
||||||
@@ -878,8 +907,8 @@ public class ProfileOrganizer {
|
|||||||
double thresholdAtMinHighCap = 0;
|
double thresholdAtMinHighCap = 0;
|
||||||
double thresholdAtLowest = CapacityCalculator.GROWTH_FACTOR;
|
double thresholdAtLowest = CapacityCalculator.GROWTH_FACTOR;
|
||||||
int cur = 0;
|
int cur = 0;
|
||||||
for (Iterator iter = reordered.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = reordered.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
double val = profile.getCapacityValue();
|
double val = profile.getCapacityValue();
|
||||||
if (val > meanCapacity)
|
if (val > meanCapacity)
|
||||||
numExceedingMean++;
|
numExceedingMean++;
|
||||||
@@ -970,8 +999,8 @@ public class ProfileOrganizer {
|
|||||||
private void locked_calculateSpeedThresholdMean(Set reordered) {
|
private void locked_calculateSpeedThresholdMean(Set reordered) {
|
||||||
double total = 0;
|
double total = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (Iterator iter = reordered.iterator(); iter.hasNext(); ) {
|
for (Iterator<PeerProfile> iter = reordered.iterator(); iter.hasNext(); ) {
|
||||||
PeerProfile profile = (PeerProfile)iter.next();
|
PeerProfile profile = iter.next();
|
||||||
if (profile.getCapacityValue() >= _thresholdCapacityValue) {
|
if (profile.getCapacityValue() >= _thresholdCapacityValue) {
|
||||||
// duplicates being clobbered is fine by us
|
// duplicates being clobbered is fine by us
|
||||||
total += profile.getSpeedValue();
|
total += profile.getSpeedValue();
|
||||||
@@ -1227,6 +1256,11 @@ public class ProfileOrganizer {
|
|||||||
return 30;
|
return 30;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** fixme add config @since 0.7.11 */
|
||||||
|
protected int getMaximumHighCapPeers() {
|
||||||
|
return 75;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Defines the minimum number of 'fast' peers that the organizer should select. If
|
* Defines the minimum number of 'fast' peers that the organizer should select. If
|
||||||
* the profile calculators derive a threshold that does not select at least this many peers,
|
* the profile calculators derive a threshold that does not select at least this many peers,
|
||||||
@@ -1266,8 +1300,8 @@ public class ProfileOrganizer {
|
|||||||
DecimalFormat fmt = new DecimalFormat("0,000.0");
|
DecimalFormat fmt = new DecimalFormat("0,000.0");
|
||||||
fmt.setPositivePrefix("+");
|
fmt.setPositivePrefix("+");
|
||||||
|
|
||||||
for (Iterator iter = organizer.selectAllPeers().iterator(); iter.hasNext(); ) {
|
for (Iterator<Hash> iter = organizer.selectAllPeers().iterator(); iter.hasNext(); ) {
|
||||||
Hash peer = (Hash)iter.next();
|
Hash peer = iter.next();
|
||||||
PeerProfile profile = organizer.getProfile(peer);
|
PeerProfile profile = organizer.getProfile(peer);
|
||||||
if (!profile.getIsActive()) {
|
if (!profile.getIsActive()) {
|
||||||
System.out.println("Peer " + profile.getPeer().toBase64().substring(0,4)
|
System.out.println("Peer " + profile.getPeer().toBase64().substring(0,4)
|
||||||
|
@@ -13,6 +13,7 @@ import net.i2p.router.JobImpl;
|
|||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
|
/** This actually boots almost everything */
|
||||||
public class BootCommSystemJob extends JobImpl {
|
public class BootCommSystemJob extends JobImpl {
|
||||||
private Log _log;
|
private Log _log;
|
||||||
|
|
||||||
@@ -26,18 +27,29 @@ public class BootCommSystemJob extends JobImpl {
|
|||||||
public String getName() { return "Boot Communication System"; }
|
public String getName() { return "Boot Communication System"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
|
// The netDb and the peer manager both take a long time to start up,
|
||||||
|
// as they may have to read in ~1000 files or more each
|
||||||
|
// So turn on the multiple job queues and start these two first.
|
||||||
|
// These two (plus the current job) will consume 3 of the 4 runners,
|
||||||
|
// leaving one for everything else, which allows us to start without
|
||||||
|
// a huge job lag displayed on the console.
|
||||||
|
getContext().jobQueue().allowParallelOperation();
|
||||||
|
startupDb();
|
||||||
|
getContext().jobQueue().addJob(new BootPeerManagerJob(getContext()));
|
||||||
|
|
||||||
// start up the network comm system
|
// start up the network comm system
|
||||||
|
|
||||||
getContext().commSystem().startup();
|
getContext().commSystem().startup();
|
||||||
getContext().tunnelManager().startup();
|
getContext().tunnelManager().startup();
|
||||||
getContext().peerManager().startup();
|
|
||||||
|
// start I2CP
|
||||||
|
getContext().jobQueue().addJob(new StartAcceptingClientsJob(getContext()));
|
||||||
|
|
||||||
|
getContext().jobQueue().addJob(new ReadConfigJob(getContext()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void startupDb() {
|
||||||
Job bootDb = new BootNetworkDbJob(getContext());
|
Job bootDb = new BootNetworkDbJob(getContext());
|
||||||
boolean useTrusted = false;
|
boolean useTrusted = Boolean.valueOf(getContext().getProperty(PROP_USE_TRUSTED_LINKS)).booleanValue();
|
||||||
String useTrustedStr = getContext().router().getConfigSetting(PROP_USE_TRUSTED_LINKS);
|
|
||||||
if (useTrustedStr != null) {
|
|
||||||
useTrusted = Boolean.TRUE.toString().equalsIgnoreCase(useTrustedStr);
|
|
||||||
}
|
|
||||||
if (useTrusted) {
|
if (useTrusted) {
|
||||||
_log.debug("Using trusted links...");
|
_log.debug("Using trusted links...");
|
||||||
getContext().jobQueue().addJob(new BuildTrustedLinksJob(getContext(), bootDb));
|
getContext().jobQueue().addJob(new BuildTrustedLinksJob(getContext(), bootDb));
|
||||||
|
@@ -10,10 +10,9 @@ package net.i2p.router.startup;
|
|||||||
|
|
||||||
import net.i2p.router.JobImpl;
|
import net.i2p.router.JobImpl;
|
||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.util.Log;
|
|
||||||
|
|
||||||
|
/** start up the network database */
|
||||||
public class BootNetworkDbJob extends JobImpl {
|
public class BootNetworkDbJob extends JobImpl {
|
||||||
private static Log _log = new Log(BootNetworkDbJob.class);
|
|
||||||
|
|
||||||
public BootNetworkDbJob(RouterContext ctx) {
|
public BootNetworkDbJob(RouterContext ctx) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
@@ -22,10 +21,6 @@ public class BootNetworkDbJob extends JobImpl {
|
|||||||
public String getName() { return "Boot Network Database"; }
|
public String getName() { return "Boot Network Database"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
// start up the network database
|
|
||||||
|
|
||||||
getContext().netDb().startup();
|
getContext().netDb().startup();
|
||||||
|
|
||||||
getContext().jobQueue().addJob(new StartAcceptingClientsJob(getContext()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -0,0 +1,26 @@
|
|||||||
|
package net.i2p.router.startup;
|
||||||
|
/*
|
||||||
|
* free (adj.): unencumbered; not under the control of others
|
||||||
|
* Written by jrandom in 2003 and released into the public domain
|
||||||
|
* with no warranty of any kind, either expressed or implied.
|
||||||
|
* It probably won't make your computer catch on fire, or eat
|
||||||
|
* your children, but it might. Use at your own risk.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
import net.i2p.router.JobImpl;
|
||||||
|
import net.i2p.router.RouterContext;
|
||||||
|
|
||||||
|
/** start up the peer manager */
|
||||||
|
public class BootPeerManagerJob extends JobImpl {
|
||||||
|
|
||||||
|
public BootPeerManagerJob(RouterContext ctx) {
|
||||||
|
super(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName() { return "Boot Peer Manager"; }
|
||||||
|
|
||||||
|
public void runJob() {
|
||||||
|
getContext().peerManager().startup();
|
||||||
|
}
|
||||||
|
}
|
@@ -12,6 +12,7 @@ import net.i2p.router.JobImpl;
|
|||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
|
/** start I2CP interface */
|
||||||
public class StartAcceptingClientsJob extends JobImpl {
|
public class StartAcceptingClientsJob extends JobImpl {
|
||||||
private Log _log;
|
private Log _log;
|
||||||
|
|
||||||
@@ -23,13 +24,10 @@ public class StartAcceptingClientsJob extends JobImpl {
|
|||||||
public String getName() { return "Start Accepting Clients"; }
|
public String getName() { return "Start Accepting Clients"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
// start up the network database
|
|
||||||
|
|
||||||
getContext().clientManager().startup();
|
getContext().clientManager().startup();
|
||||||
|
|
||||||
getContext().jobQueue().addJob(new ReadConfigJob(getContext()));
|
|
||||||
// pointless
|
// pointless
|
||||||
//getContext().jobQueue().addJob(new RebuildRouterInfoJob(getContext()));
|
//getContext().jobQueue().addJob(new RebuildRouterInfoJob(getContext()));
|
||||||
getContext().jobQueue().allowParallelOperation();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user