merge of 'cc7dee6f711dd10db6c1f42af8dc7ba6f6b0002d'

and 'dc2fa2d01da4c7b3733d4dadb85d757d592c1fa6'
This commit is contained in:
zzz
2008-02-19 15:16:53 +00:00
10 changed files with 117 additions and 53 deletions

View File

@@ -70,19 +70,32 @@ public class GraphHelper {
}
if (hasTx && hasRx && !_showEvents)
_out.write("<a href=\"viewstat.jsp?stat=bw.combined"
+ "&amp;periodCount=" + (3 * _periodCount )
+ "&amp;width=" + (3 * _width)
+ "&amp;height=" + (3 * _height)
+ "\" />");
_out.write("<img width=\""
+ (_width + 83) + "\" height=\"" + (_height + 92)
+ "\" src=\"viewstat.jsp?stat=bw.combined"
+ "&amp;periodCount=" + _periodCount
+ "&amp;width=" + _width
+ "&amp;height=" + (_height - 14)
+ "\" title=\"Combined bandwidth graph\" />\n");
+ "\" title=\"Combined bandwidth graph\" /></a>\n");
for (Iterator iter = ordered.iterator(); iter.hasNext(); ) {
SummaryListener lsnr = (SummaryListener)iter.next();
Rate r = lsnr.getRate();
String title = r.getRateStat().getName() + " for " + DataHelper.formatDuration(_periodCount * r.getPeriod());
_out.write("<img width=\""
_out.write("<a href=\"viewstat.jsp?stat="
+ r.getRateStat().getName()
+ "&amp;showEvents=" + _showEvents
+ "&amp;period=" + r.getPeriod()
+ "&amp;periodCount=" + (3 * _periodCount)
+ "&amp;width=" + (3 * _width)
+ "&amp;height=" + (3 * _height)
+ "\" />");
_out.write("<img border=\"0\" width=\""
+ (_width + 83) + "\" height=\"" + (_height + 92)
+ "\" src=\"viewstat.jsp?stat="
+ r.getRateStat().getName()
@@ -91,7 +104,7 @@ public class GraphHelper {
+ "&amp;periodCount=" + _periodCount
+ "&amp;width=" + _width
+ "&amp;height=" + _height
+ "\" title=\"" + title + "\" />\n");
+ "\" title=\"" + title + "\" /></a>\n");
}
if (_refreshDelaySeconds > 0)
_out.write("<meta http-equiv=\"refresh\" content=\"" + _refreshDelaySeconds + "\" />\n");

View File

@@ -162,7 +162,8 @@ public class UpdateHandler {
_status = "<b>Update verified</b><br />Restarting<br />";
restart();
} else {
_log.log(Log.CRIT, "Update was INVALID - have you changed your keys?");
_log.log(Log.CRIT, "Update was INVALID - signing key is not trusted!");
_status = "<b>Update signing key invalid</b><br />";
System.setProperty("net.i2p.router.web.UpdateHandler.updateInProgress", "false");
}
}

View File

@@ -11,6 +11,7 @@ Change revision in:
installer/install.xml
news.xml
router/java/src/net/i2p/router/RouterVersion.java
core/java/src/net/i2p/CoreVersion.java
Build and tag:
ant dist

View File

@@ -207,11 +207,11 @@ public class SimpleTimer {
_occurredEventCount += eventsToFire.size();
} else {
_occurredTime = now;
if (_occurredEventCount > 1000) {
if (_occurredEventCount > 2500) {
StringBuffer buf = new StringBuffer(128);
buf.append("Too many simpleTimerJobs (").append(_occurredEventCount);
buf.append(") in a second!");
_log.log(Log.CRIT, buf.toString());
_log.log(Log.WARN, buf.toString());
}
_occurredEventCount = 0;
}

View File

@@ -1,3 +1,27 @@
2008-02-16 zzz
* Fix race in TunnelDispatcher which caused
participating tunnel count to seesaw -
should increase network capacity
* Leave participating tunnels in 10s batches for efficiency
* Update participating tunnel ratestat when leaving a tunnel too,
to generate a smoother graph
* Fix tunnel.participatingMessageCount stat to include all
participating tunnels, not just outbound endpoints
* Simplify Expire Tunnel job name
2008-02-13 zzz
* PersistentDataStore: Write out 300 records every 10 min
rather than 1 every 10 sec;
Don't store leasesets to disk or read them in
* Combine rates for pools with the same length setting
in the new tunnel build algorithm
* Clarify a log message in the UpdateHandler
2008-02-13 zzz
* Make graphs clickable to get larger graphs
* Change SimpleTimer CRIT to a WARN, increase threshold
* Checklist update
2008-02-10 zzz
* Add new tunnel build algorithm (preliminary)
* Change NTCP backlogged message from error to warning

View File

@@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
public class RouterVersion {
public final static String ID = "$Revision: 1.548 $ $Date: 2008-02-10 15:00:00 $";
public final static String VERSION = "0.6.1.31";
public final static long BUILD = 1;
public final static long BUILD = 4;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@@ -66,7 +66,9 @@ class PersistentDataStore extends TransientDataStore {
public void put(Hash key, DataStructure data) {
if ( (data == null) || (key == null) ) return;
super.put(key, data);
_writer.queue(key, data);
// Don't bother writing LeaseSets to disk
if (data instanceof RouterInfo)
_writer.queue(key, data);
}
public int countLeaseSets() {
@@ -103,7 +105,7 @@ class PersistentDataStore extends TransientDataStore {
}
public String getName() { return "Remove Key"; }
public void runJob() {
_log.info("Removing key " + _key, getAddedBy());
_log.info("Removing key " + _key /* , getAddedBy() */);
try {
File dbDir = getDbDir();
removeFile(_key, dbDir);
@@ -113,6 +115,9 @@ class PersistentDataStore extends TransientDataStore {
}
}
/*
* Queue up writes, write up to 300 files every 10 minutes
*/
private class Writer implements Runnable {
private Map _keys;
private List _keyOrder;
@@ -137,12 +142,15 @@ class PersistentDataStore extends TransientDataStore {
public void run() {
Hash key = null;
DataStructure data = null;
int count = 0;
while (true) { // hmm, probably want a shutdown handle... though this is a daemon thread
try {
synchronized (_keys) {
if (_keyOrder.size() <= 0) {
count = 0;
_keys.wait();
} else {
count++;
key = (Hash)_keyOrder.remove(0);
data = (DataStructure)_keys.remove(key);
}
@@ -153,7 +161,10 @@ class PersistentDataStore extends TransientDataStore {
write(key, data);
key = null;
data = null;
try { Thread.sleep(10*1000); } catch (InterruptedException ie) {}
if (count >= 300)
count = 0;
if (count == 0)
try { Thread.sleep(10*60*1000); } catch (InterruptedException ie) {}
}
}
}
@@ -227,6 +238,7 @@ class PersistentDataStore extends TransientDataStore {
int routerCount = 0;
try {
File dbDir = getDbDir();
/****
if (getContext().router().getUptime() < 10*60*1000) {
File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance());
if (leaseSetFiles != null) {
@@ -237,6 +249,7 @@ class PersistentDataStore extends TransientDataStore {
}
}
}
****/
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) {
routerCount += routerInfoFiles.length;
@@ -259,6 +272,7 @@ class PersistentDataStore extends TransientDataStore {
}
}
/****
private class ReadLeaseJob extends JobImpl {
private File _leaseFile;
private Hash _key;
@@ -313,6 +327,7 @@ class PersistentDataStore extends TransientDataStore {
}
}
}
****/
private class ReadRouterJob extends JobImpl {
private File _routerFile;

View File

@@ -338,14 +338,19 @@ public class TunnelDispatcher implements Service {
_log.debug("removing " + cfg);
boolean removed = false;
int numParticipants = 0;
synchronized (_participatingConfig) {
removed = (null != _participatingConfig.remove(recvId));
numParticipants = _participatingConfig.size();
}
if (!removed) {
if (_log.shouldLog(Log.WARN))
_log.warn("Participating tunnel, but no longer listed in participatingConfig? " + cfg);
}
_context.statManager().addRateData("tunnel.participatingTunnels", numParticipants, 0);
_context.statManager().addRateData("tunnel.participatingMessageCount", cfg.getProcessedMessagesCount(), 10*60*1000);
synchronized (_participants) {
removed = (null != _participants.remove(recvId));
}
@@ -357,8 +362,6 @@ public class TunnelDispatcher implements Service {
synchronized (_outboundEndpoints) {
removed = (null != _outboundEndpoints.remove(recvId));
}
_context.statManager().addRateData("tunnel.participatingMessageCount", cfg.getProcessedMessagesCount(), 10*60*1000);
}
/**
@@ -635,32 +638,41 @@ public class TunnelDispatcher implements Service {
_times = new ArrayList(128);
}
private static final int LEAVE_BATCH_TIME = 10*1000;
public void add(HopConfig cfg) {
Long dropTime = new Long(cfg.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
Long dropTime = new Long(cfg.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR + LEAVE_BATCH_TIME);
boolean noTunnels;
synchronized (LeaveTunnel.this) {
noTunnels = _configs.size() <= 0;
_configs.add(cfg);
_times.add(dropTime);
}
// Make really sure we queue or requeue the job only when we have to, or else bad things happen.
// Locking around this part may not be sufficient but there was nothing before.
// Symptom is the Leave Participant job not running for 12m, leading to seesaw participating tunnel count
long oldAfter = getTiming().getStartAfter();
long oldStart = getTiming().getActualStart();
if ( noTunnels || (oldAfter <= 0) ||
(oldAfter < getContext().clock().now() && oldAfter <= oldStart) || // if oldAfter > oldStart, it's late but it will run, so don't do this (race)
(oldAfter >= dropTime.longValue()) ) {
getTiming().setStartAfter(dropTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
} else {
// already scheduled for the future, and before this expiration
}
}
if (_log.shouldLog(Log.INFO)) {
long now = getContext().clock().now();
_log.info("Scheduling leave in " + DataHelper.formatDuration(dropTime.longValue()-now) +": " + cfg);
}
long oldAfter = getTiming().getStartAfter();
if ( (oldAfter <= 0) || (oldAfter < getContext().clock().now()) || (oldAfter >= dropTime.longValue()) ) {
getTiming().setStartAfter(dropTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
} else {
// already scheduled for the future, and before this expiration
}
}
public String getName() { return "Leave participant"; }
public void runJob() {
HopConfig cur = null;
Long nextTime = null;
long now = getContext().clock().now();
long now = getContext().clock().now() + LEAVE_BATCH_TIME; // leave all expiring in next 10 sec
while (true) {
synchronized (LeaveTunnel.this) {
if (_configs.size() <= 0)
@@ -685,8 +697,10 @@ public class TunnelDispatcher implements Service {
}
if (nextTime != null) {
getTiming().setStartAfter(nextTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
synchronized (LeaveTunnel.this) {
getTiming().setStartAfter(nextTime.longValue());
getContext().jobQueue().addJob(LeaveTunnel.this);
}
}
}
}

View File

@@ -29,24 +29,7 @@ class ExpireJob extends JobImpl {
getTiming().setStartAfter(expire);
}
public String getName() {
if (_pool.getSettings().isExploratory()) {
if (_pool.getSettings().isInbound()) {
return "Expire exploratory inbound tunnel";
} else {
return "Expire exploratory outbound tunnel";
}
} else {
StringBuffer rv = new StringBuffer(32);
if (_pool.getSettings().isInbound())
rv.append("Expire inbound client tunnel for ");
else
rv.append("Expire outbound client tunnel for ");
if (_pool.getSettings().getDestinationNickname() != null)
rv.append(_pool.getSettings().getDestinationNickname());
else
rv.append(_pool.getSettings().getDestination().toBase64().substring(0,4));
return rv.toString();
}
return "Expire tunnel";
}
public void runJob() {
if (!_leaseUpdated) {

View File

@@ -40,7 +40,6 @@ public class TunnelPool {
private long _lastRateUpdate;
private long _lastLifetimeProcessed;
private final String _rateName;
private final String _buildStatName;
private static final int TUNNEL_LIFETIME = 10*60*1000;
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel) {
@@ -61,9 +60,6 @@ public class TunnelPool {
_rateName = "tunnel.Bps." +
(_settings.isExploratory() ? "exploratory" : _settings.getDestinationNickname()) +
(_settings.isInbound() ? ".in" : ".out");
_buildStatName = "tunnel.build." +
(_settings.isExploratory() ? "exploratory" : _settings.getDestinationNickname()) +
(_settings.isInbound() ? ".in" : ".out");
refreshSettings();
}
@@ -87,9 +83,6 @@ public class TunnelPool {
_context.statManager().createRateStat(_rateName,
"Tunnel Bandwidth", "Tunnels",
new long[] { 5*60*1000l });
_context.statManager().createRateStat(_buildStatName,
"Tunnel Build Frequency", "Tunnels",
new long[] { TUNNEL_LIFETIME });
}
public void shutdown() {
@@ -460,6 +453,17 @@ public class TunnelPool {
public long getLifetimeProcessed() { return _lifetimeProcessed; }
/**
* Keep a separate stat for each type, direction, and length of tunnel.
*/
private final String buildRateName() {
if (_settings.isExploratory())
return "tunnel.buildRatio.exploratory." + (_settings.isInbound() ? "in" : "out");
else
return "tunnel.buildRatio.l" + _settings.getLength() + "v" + _settings.getLengthVariance() +
(_settings.isInbound() ? ".in" : ".out");
}
/**
* Gather the data to see how many tunnels to build, and then actually compute that value (delegated to
* the countHowManyToBuild function below)
@@ -491,8 +495,17 @@ public class TunnelPool {
*
**/
// Compute the average time it takes us to build a single tunnel of this type.
int avg = 0;
RateStat rs = _context.statManager().getRate(_buildStatName);
RateStat rs = _context.statManager().getRate(buildRateName());
if (rs == null) {
// Create the RateStat here rather than at the top because
// the user could change the length settings while running
_context.statManager().createRateStat(buildRateName(),
"Tunnel Build Frequency", "Tunnels",
new long[] { TUNNEL_LIFETIME });
rs = _context.statManager().getRate(buildRateName());
}
if (rs != null) {
Rate r = rs.getRate(TUNNEL_LIFETIME);
if (r != null)
@@ -568,7 +581,7 @@ public class TunnelPool {
+ " soon " + expireSoon + " later " + expireLater
+ " std " + wanted + " inProgress " + inProgress + " fallback " + fallback
+ " for " + toString());
_context.statManager().addRateData(_buildStatName, rv + inProgress, 0);
_context.statManager().addRateData(buildRateName(), rv + inProgress, 0);
return rv;
}
@@ -622,7 +635,7 @@ public class TunnelPool {
int rv = countHowManyToBuild(allowZeroHop, expire30s, expire90s, expire150s, expire210s, expire270s,
expireLater, wanted, inProgress, fallback);
_context.statManager().addRateData(_buildStatName, (rv > 0 || inProgress > 0) ? 1 : 0, 0);
_context.statManager().addRateData(buildRateName(), (rv > 0 || inProgress > 0) ? 1 : 0, 0);
return rv;
}