Compare commits

...

7 Commits

Author SHA1 Message Date
b49dd4fea2 Merge branch 'master' of i2pgit.org:i2p-hackers/i2p.i2p into i2p.i2p.2.4.0-revert-buildhandler-changes 2023-11-07 14:38:42 -05:00
zzz
3db4f23514 Update checklist for tx v3 2023-11-07 13:31:46 -05:00
zzz
93ff5ffa10 Translations: Fix up config file (Gitlab #408)
to revert mess caused by tx migrate and add missing file_filter lines

- Add comments back
- Restore trans.xx sorting within sections
- Add file_filter line for each section
- Add lang_map
2023-11-07 12:31:26 -05:00
idk
2ab938629d Merge branch 'i2p.i2p.2.4.0-cleanup-todos' into 'master'
Cleans up some dead code, revise comments and TODO's

See merge request i2p-hackers/i2p.i2p!148
2023-11-07 14:43:58 +00:00
idk
f363f9318e Cleans up some dead code, revise comments and TODO's 2023-11-07 14:43:58 +00:00
8ae54270c9 Router: remove MIN_VERSION_HONOR_CAPS from BuildHandler 2023-11-03 10:19:48 -04:00
fdb6701bf5 Router: remove unused code and imports in BuildHandler 2023-10-31 14:06:20 -04:00
6 changed files with 665 additions and 625 deletions

1232
.tx/config

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@
- Review changes in English po files, fix up any necessary tagged strings in Java source
- Revert English po files with no actual changes (i.e. with line number changes only)
- Check in remaining English po files (and any files with changed strings)
- Push to Transifex: `tx push -s`
- Push to Transifex: `tx push --use-git-timestamps -s`
- Make announcement on Transifex with checkin deadline
- GeoIP: db-ip.com update is usually first of the month, time accordingly
@ -47,7 +47,7 @@
- See README for setup
- `./create_new_entry.sh`
- Entry href should be the in-net link to the release blog post
- `tx push -s`
- `tx push --use-git-timestamps -s`
- `git commit`
2. Write the draft blog post and push to Transifex:
@ -55,7 +55,7 @@
- Checkout i2p.www branch
- Write draft release announcement - see i2p2www/blog/README for instructions
- Top content should be the same as the news entry
- `tx push -s -r I2P.website_blog`
- `tx push --use-git-timestamps -s -r I2P.website_blog`
- `git commit`
3. Make announcement on Transifex asking for news translation
@ -73,7 +73,7 @@
- Look for newly translated languages and resources on Transifex
- Add any new ones to .tx/config (use your own judgement on which to include
based on minimum translated percentage)
- `tx pull`
- `tx pull --use-git-timestamps`
- `ant testscripts` to verify that all updated translations are valid
- For any invalid that break the test, fix up the po file manually, or fix on
tx and pull again, or (if new) comment out in .tx/config (add a comment why)
@ -266,7 +266,7 @@
8. Notify downstream Debian maintainer
9. Pull announcement translations:
- `tx pull -r I2P.website_blog`
- `tx pull --use-git-timestamps -r I2P.website_blog`
Do NOT forget this step!
- `./update-existing-po.sh`
- `git commit i2p2www/translations/ -m "Updated translations"`

View File

@ -215,38 +215,14 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something.
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
//if (!chanceOfFloodingOurOwn(-1)) {
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
//} else {
// _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
//} Less sure I should do this this time around. TODO: figure out how this should adjust
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
} else {
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
}
/* TODO: figure out how this should work
private boolean chanceOfFloodingOurOwn(int percent) {
if (percent < 0) {
// make percent equal to 1-peer.failedLookupRate by retrieving it from the stats
RateStat percentRate = _context.statManager().getRate("netDb.failedLookupRate");
if (percentRate != null)
percent = (1-(int)percentRate.getLifetimeAverageValue())*100;
else {
_log.warn("chanceOfFloodingOurOwn() could not find netDb.failedLookupRate");
return false;
}
}
// if the router has been up for at least an hour
if (_context.router().getUptime() > 60*60*1000) {
// then 30% of the time return true
return Math.random() < (percent / 100.0f);
}
return false;
}*/
/**
* Increments and tests.
* @since 0.7.11
@ -514,14 +490,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* @return null always
* @since 0.9.10
*/
// ToDo: With repect to segmented netDb clients, this framework needs
// refinement. A client with a segmented netDb can not use exploratory
// tunnels. The return messages will not have sufficient information
// to be directed back to the clientmaking the query.
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease,
Hash fromLocalDest) {
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
if (fromLocalDest == null && isClientDb()) throw new IllegalArgumentException("client subDbs cannot use exploratory tunnels");
boolean isNew = false;
FloodSearchJob searchJob;
synchronized (_activeFloodQueries) {

View File

@ -41,7 +41,7 @@ class RefreshRoutersJob extends JobImpl {
* Don't go faster as this overloads the expl. OBEP / IBGW
*/
private final static long RERUN_DELAY_MS = 2500;
public final static long EXPIRE = 2*60*60*1000;
private final static long EXPIRE = 2*60*60*1000;
private final static long NEW_LOOP_DELAY = 37*60*1000;
private static final int ENOUGH_FFS = 3 * StartExplorersJob.LOW_FFS;

View File

@ -147,10 +147,6 @@ class CapacityCalculator {
capacity -= PENALTY_CAP_E;
}
}
/* TODO: G caps can be excluded in TunnelPeerSelector by adding it to DEFAULT_EXCLUDE_CAPS */
// decide what other handling if any is needed here.
//else if (caps.indexOf(Router.CAPABILITY_NO_TUNNELS) >= 0)
// capacity -= PENALTY_G_CAP;
} else {
capacity -= PENALTY_NO_RI;
}

View File

@ -238,13 +238,6 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
// Handling of client tunnel messages need explicit handling
// in the context of the client subDb.
if (_client != null) {
//Hash dbid = _context.netDbSegmentor().getDbidByHash(_client);
/*if (dbid == null) {
// This error shouldn't occur. All clients should have their own netDb.
if (_log.shouldLog(Log.ERROR))
_log.error("Error, client (" + _clientNickname + ") dbid not found while processing messages in the IBMD.");
return;
}*/
// For now, the only client message we know how to handle here is a DSM.
// There aren't normally DSM messages here, but it should be safe to store
// them in the client netDb.