20 Commits

Author SHA1 Message Date
eyedeekay
627e131a58 Merge branch 'routerinfo' of https://github.com/hkh4n/go-i2p into routerinfo 2024-11-16 13:20:45 -05:00
idk
f729bda62d Merge branch 'master' into routerinfo 2024-11-16 18:17:10 +00:00
eyedeekay
4ad0f97bfe Fail-fast switch for logging Logging, format 2024-11-16 13:15:33 -05:00
eyedeekay
20032e0f55 Log: document fast-fail mode in the README. 2024-11-16 13:11:06 -05:00
eyedeekay
700391788f Log: create WARNFAIL_I2P environment variable, if not empty, all WARN or higher debug messages will result in an immediate fatal error and stop the program. 2024-11-16 12:52:26 -05:00
idk
e296441f29 Merge branch 'master' into routerinfo 2024-11-16 03:56:57 +00:00
Haris Khan
62086c7d04 make fmt 2024-11-15 22:52:17 -05:00
Haris Khan
ddba94d6ae remove printing 2024-11-15 22:48:57 -05:00
Haris Khan
767b91df49 clean up temp dirs 2024-11-15 22:47:36 -05:00
Haris Khan
1292098cf0 Merge remote-tracking branch 'origin/10k' into 10k 2024-11-15 22:45:04 -05:00
eyedeekay
24bc4c3c17 Implemented ed25519 SPK's 2024-11-15 22:29:42 -05:00
Haris Khan
81eb270351 !WIP! - 10k test 2024-11-15 22:06:23 -05:00
eyedeekay
b6f197cf92 This is not correct yet, work on key_certificate.go lines 216-245 2024-11-15 17:35:44 -05:00
eyedeekay
c10d98a3b2 export DEBUG_I2P=debug in Makefile so that extended logs show up in the tests 2024-11-15 16:52:37 -05:00
Haris Khan
6d16ca5f87 debugging info to investigate 2024-11-15 14:43:36 -05:00
Haris Khan
003d6c9ab8 !WIP! - 10k test 2024-11-15 13:53:44 -05:00
idk
df45c19272 Merge pull request #24 from satk0/fix-key-certificate-tests
Fix key certificate tests
2024-11-14 15:50:16 +00:00
satk0
f6894e9064 Fix PubKeyWithP521 test 2024-11-12 23:54:50 +01:00
satk0
b36ef65a10 Fix test when data is too small 2024-11-11 23:04:21 +01:00
eyedeekay
8c2b952616 setup auto-assign workflow 2024-11-08 15:01:05 -05:00
23 changed files with 607 additions and 530 deletions

20
.github/workflows/auto-assign.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: Auto Assign
on:
issues:
types: [opened]
pull_request:
types: [opened]
jobs:
run:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- name: 'Auto-assign issue'
uses: pozil/auto-assign-issue@v1
with:
repo-token:${{ secrets.GITHUB_TOKEN }}
assignees: eyedeekay
numOfAssignee: 1

View File

@@ -3,6 +3,7 @@ RELEASE_VERSION=${RELEASE_TAG}
RELEASE_DESCRIPTION=`cat PASTA.md` RELEASE_DESCRIPTION=`cat PASTA.md`
REPO := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) REPO := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
CGO_ENABLED=0 CGO_ENABLED=0
export DEBUG_I2P=debug
ifdef GOROOT ifdef GOROOT
GO = $(GOROOT)/bin/go GO = $(GOROOT)/bin/go

View File

@@ -94,7 +94,7 @@ please keep up with these changes, as they will not be backward compatible and r
- [X] Session Tag - [X] Session Tag
## Verbosity ## ## Verbosity ##
Logging can be enabled and configured using the DEBUG_I2P environment variable. By default, logging is disabled. Logging can be enabled and configured using the `DEBUG_I2P` environment variable. By default, logging is disabled.
There are three available log levels: There are three available log levels:
@@ -113,6 +113,17 @@ export DEBUG_I2P=error
If DEBUG_I2P is set to an unrecognized variable, it will fall back to "debug". If DEBUG_I2P is set to an unrecognized variable, it will fall back to "debug".
## Fast-Fail mode ##
Fast-Fail mode can be activated by setting `WARNFAIL_I2P` to any non-empty value. When set, every warning or error is Fatal.
It is unsafe for production use, and intended only for debugging and testing purposes.
```shell
export WARNFAIL_I2P=true
```
If `WARNFAIL_I2P` is set and `DEBUG_I2P` is unset, `DEBUG_I2P` will be set to `debug`.
## Contributing ## Contributing
See CONTRIBUTING.md for more information. See CONTRIBUTING.md for more information.

View File

@@ -96,39 +96,14 @@ func (c *Certificate) ExcessBytes() []byte {
return nil return nil
} }
// Data returns the payload of a Certificate, payload is trimmed to the specified length // Bytes returns the entire certificate in []byte form, trims payload to specified length.
func (c *Certificate) Data() []byte {
length := c.Length()
if length == 0 {
return []byte{}
}
if length > len(c.payload) {
log.WithFields(logrus.Fields{
"specified_length": length,
"actual_length": len(c.payload),
}).Warn("Certificate payload shorter than specified length")
return c.payload
}
return c.payload[:length]
}
// Bytes returns the entire certificate in []byte form
func (c *Certificate) Bytes() []byte { func (c *Certificate) Bytes() []byte {
if c.kind.Int() == CERT_NULL {
return []byte{0, 0, 0}
}
bytes := c.kind.Bytes() bytes := c.kind.Bytes()
bytes = append(bytes, c.len.Bytes()...) bytes = append(bytes, c.len.Bytes()...)
bytes = append(bytes, c.Data()...) bytes = append(bytes, c.Data()...)
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"bytes_length": len(bytes), "bytes_length": len(bytes),
"cert_type": c.kind.Int(),
}).Debug("Generated bytes for certificate") }).Debug("Generated bytes for certificate")
return bytes return bytes
} }
@@ -155,6 +130,21 @@ func (c *Certificate) Length() (length int) {
return return
} }
// Data returns the payload of a Certificate, payload is trimmed to the specified length.
func (c *Certificate) Data() (data []byte) {
lastElement := c.Length()
if lastElement > len(c.payload) {
data = c.payload
log.Warn("Certificate payload shorter than specified length")
} else {
data = c.payload[0:lastElement]
}
log.WithFields(logrus.Fields{
"data_length": len(data),
}).Debug("Retrieved certificate data")
return
}
// readCertificate creates a new Certficiate from []byte // readCertificate creates a new Certficiate from []byte
// returns err if the certificate is too short or if the payload doesn't match specified length. // returns err if the certificate is too short or if the payload doesn't match specified length.
func readCertificate(data []byte) (certificate Certificate, err error) { func readCertificate(data []byte) (certificate Certificate, err error) {
@@ -166,67 +156,44 @@ func readCertificate(data []byte) (certificate Certificate, err error) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate", "at": "(Certificate) NewCertificate",
"certificate_bytes_length": len(data), "certificate_bytes_length": len(data),
"reason": "too short (len < CERT_MIN_SIZE)", "reason": "too short (len < CERT_MIN_SIZE)" + fmt.Sprintf("%d", certificate.kind.Int()),
}).Error("invalid certificate, empty") }).Error("invalid certificate, empty")
err = fmt.Errorf("error parsing certificate: certificate is empty") err = fmt.Errorf("error parsing certificate: certificate is empty")
return return
case 1, 2: case 1, 2:
certificate.kind = Integer(data[0:1]) certificate.kind = Integer(data[0 : len(data)-1])
certificate.len = Integer([]byte{0}) certificate.len = Integer([]byte{0})
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate", "at": "(Certificate) NewCertificate",
"certificate_bytes_length": len(data), "certificate_bytes_length": len(data),
"reason": "too short (len < CERT_MIN_SIZE)", "reason": "too short (len < CERT_MIN_SIZE)" + fmt.Sprintf("%d", certificate.kind.Int()),
}).Error("invalid certificate, too short") }).Error("invalid certificate, too short")
err = fmt.Errorf("error parsing certificate: certificate is too short") err = fmt.Errorf("error parsing certificate: certificate is too short")
return return
default: default:
certificate.kind = Integer(data[0:1]) certificate.kind = Integer(data[0:1])
certificate.len = Integer(data[1:3]) certificate.len = Integer(data[1:3])
payloadLength := len(data) - CERT_MIN_SIZE
// Validate certificate type
if certificate.kind.Int() < CERT_NULL || certificate.kind.Int() > CERT_KEY {
log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate",
"type": certificate.kind.Int(),
}).Error("invalid certificate type")
err = fmt.Errorf("error parsing certificate: invalid type: %d", certificate.kind.Int())
return
}
// Handle NULL certificates
if certificate.kind.Int() == CERT_NULL && certificate.len.Int() != 0 {
log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate",
"length": certificate.len.Int(),
}).Error("NULL certificate must have zero length")
err = fmt.Errorf("error parsing certificate: NULL certificate must have zero length")
return
}
// Validate payload length
expectedLength := certificate.len.Int()
actualLength := len(data) - CERT_MIN_SIZE
if expectedLength > actualLength {
log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate",
"expected_length": expectedLength,
"actual_length": actualLength,
}).Error("certificate data shorter than specified length")
err = fmt.Errorf("error parsing certificate: data shorter than specified length")
return
}
certificate.payload = data[CERT_MIN_SIZE:] certificate.payload = data[CERT_MIN_SIZE:]
if certificate.len.Int() > len(data)-CERT_MIN_SIZE {
err = fmt.Errorf("certificate parsing warning: certificate data is shorter than specified by length")
log.WithFields(logrus.Fields{
"at": "(Certificate) NewCertificate",
"certificate_bytes_length": certificate.len.Int(),
"certificate_payload_length": payloadLength,
"data_bytes:": string(data),
"kind_bytes": data[0:1],
"len_bytes": data[1:3],
"reason": err.Error(),
}).Error("invalid certificate, shorter than specified by length")
return
}
log.WithFields(logrus.Fields{
"type": certificate.kind.Int(),
"length": certificate.len.Int(),
}).Debug("Successfully created new certificate")
return
} }
log.WithFields(logrus.Fields{
"type": certificate.kind.Int(),
"length": certificate.len.Int(),
}).Debug("Successfully parsed certificate")
return
} }
// ReadCertificate creates a Certificate from []byte and returns any ExcessBytes at the end of the input. // ReadCertificate creates a Certificate from []byte and returns any ExcessBytes at the end of the input.

View File

@@ -38,8 +38,7 @@ func (i Date) Bytes() []byte {
// Int returns the Date as a Go integer. // Int returns the Date as a Go integer.
func (i Date) Int() int { func (i Date) Int() int {
val, _ := intFromBytes(i.Bytes()) return intFromBytes(i.Bytes())
return val
} }
// Time takes the value stored in date as an 8 byte big-endian integer representing the // Time takes the value stored in date as an 8 byte big-endian integer representing the

View File

@@ -2,8 +2,6 @@ package data
import ( import (
"crypto/sha256" "crypto/sha256"
"crypto/subtle"
"errors"
"io" "io"
) )
@@ -12,68 +10,38 @@ import (
Accurate for version 0.9.49 Accurate for version 0.9.49
Description Description
Represents the SHA256 of some data. Used throughout I2P for data verification Represents the SHA256 of some data.
and identity representation. Must be compared using constant-time operations
to prevent timing attacks.
Contents Contents
32 bytes representing a SHA256 hash value 32 bytes
[I2P Hash]:
*/ */
var ( // Hash is the represenation of an I2P Hash.
ErrInvalidHashSize = errors.New("invalid hash size") //
ErrNilReader = errors.New("nil reader")
)
// Hash is the representation of an I2P Hash.
// It is always exactly 32 bytes containing a SHA256 sum.
//
// https://geti2p.net/spec/common-structures#hash // https://geti2p.net/spec/common-structures#hash
type Hash [32]byte type Hash [32]byte
// Bytes returns a copy of the Hash as a 32-byte array.
// This prevents modification of the original hash value.
func (h Hash) Bytes() [32]byte { func (h Hash) Bytes() [32]byte {
return h return h
} }
// Equal compares two hashes in constant time.
// Returns true if the hashes are identical.
func (h Hash) Equal(other Hash) bool {
return subtle.ConstantTimeCompare(h[:], other[:]) == 1
}
// IsZero returns true if the hash is all zeros.
func (h Hash) IsZero() bool {
var zero Hash
return h.Equal(zero)
}
// HashData returns the SHA256 sum of a []byte input as Hash. // HashData returns the SHA256 sum of a []byte input as Hash.
// Never returns an error as SHA256 operates on any input length. func HashData(data []byte) (h Hash) {
func HashData(data []byte) Hash { // log.Println("Hashing Data:", data)
if data == nil { h = sha256.Sum256(data)
data = []byte{} // Handle nil input gracefully return
}
return sha256.Sum256(data)
} }
// HashReader returns the SHA256 sum from all data read from an io.Reader. // HashReader returns the SHA256 sum from all data read from an io.Reader.
// Returns an error if one occurs while reading from reader or if reader is nil. // return error if one occurs while reading from reader
func HashReader(r io.Reader) (Hash, error) { func HashReader(r io.Reader) (h Hash, err error) {
var h Hash
if r == nil {
return h, ErrNilReader
}
sha := sha256.New() sha := sha256.New()
_, err := io.Copy(sha, r) _, err = io.Copy(sha, r)
if err != nil { if err == nil {
return h, err d := sha.Sum(nil)
copy(h[:], d)
} }
return
sum := sha.Sum(nil) }
copy(h[:], sum)
return h, nil
}

View File

@@ -1,65 +0,0 @@
package data
import (
"io"
"strings"
"testing"
)
func TestHash(t *testing.T) {
tests := []struct {
name string
data []byte
want Hash
}{
{
name: "Empty input",
data: []byte{},
want: HashData([]byte{}),
},
{
name: "Nil input",
data: nil,
want: HashData([]byte{}),
},
// Add more test cases
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := HashData(tt.data)
if !got.Equal(tt.want) {
t.Errorf("HashData() = %v, want %v", got, tt.want)
}
})
}
}
func TestHashReader(t *testing.T) {
tests := []struct {
name string
reader io.Reader
wantErr bool
}{
{
name: "Nil reader",
reader: nil,
wantErr: true,
},
{
name: "Empty reader",
reader: strings.NewReader(""),
wantErr: false,
},
// Add more test cases
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := HashReader(tt.reader)
if (err != nil) != tt.wantErr {
t.Errorf("HashReader() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -2,15 +2,13 @@ package data
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"math"
) )
// MAX_INTEGER_SIZE is the maximum length of an I2P integer in bytes. // MAX_INTEGER_SIZE is the maximum length of an I2P integer in bytes.
const MAX_INTEGER_SIZE = 8 const MAX_INTEGER_SIZE = 8
/* /*
[I2P Integer] [I2P Hash]
Accurate for version 0.9.49 Accurate for version 0.9.49
Description Description
@@ -20,112 +18,68 @@ Contents
1 to 8 bytes in network byte order (big endian) representing an unsigned integer. 1 to 8 bytes in network byte order (big endian) representing an unsigned integer.
*/ */
var ( // Integer is the represenation of an I2P Integer.
// ErrInvalidSize indicates the requested integer size is invalid (<=0 or >MAX_INTEGER_SIZE) //
ErrInvalidSize = errors.New("invalid integer size") // https://geti2p.net/spec/common-structures#integer
// ErrInsufficientData indicates there isn't enough data to read the requested size
ErrInsufficientData = errors.New("insufficient data")
// ErrNegativeValue indicates an attempt to create an Integer from a negative value
ErrNegativeValue = errors.New("negative values not allowed")
// ErrIntegerOverflow indicates the value exceeds the maximum allowed size
ErrIntegerOverflow = errors.New("integer overflow")
)
// Integer is the representation of an I2P Integer.
// It contains 1 to 8 bytes in network byte order (big endian)
// representing an unsigned integer value.
type Integer []byte type Integer []byte
// Bytes returns the raw []byte content of an Integer. // Bytes returns the raw []byte content of an Integer.
// This represents the big-endian encoded form of the integer.
func (i Integer) Bytes() []byte { func (i Integer) Bytes() []byte {
return i return i[:]
} }
// Int returns the Integer as a Go integer. // Int returns the Date as a Go integer
// Returns an error if the value would overflow on the current platform
// or if the encoding is invalid.
func (i Integer) Int() int { func (i Integer) Int() int {
val, _ := intFromBytes(i) return intFromBytes(i.Bytes())
return val
} }
// ReadInteger returns an Integer from a []byte of specified length. // ReadInteger returns an Integer from a []byte of specified length.
// The remaining bytes after the specified length are also returned. // The remaining bytes after the specified length are also returned.
// Returns an error if size is invalid or there isn't enough data. func ReadInteger(bytes []byte, size int) (Integer, []byte) {
func ReadInteger(bytes []byte, size int) (Integer, []byte, error) { if len(bytes) < size {
if size <= 0 { return bytes[:size], bytes[len(bytes):]
return nil, bytes, ErrInvalidSize
} }
if size > len(bytes) { return bytes[:size], bytes[size:]
return nil, bytes, ErrInsufficientData
}
return Integer(bytes[:size]), bytes[size:], nil
} }
// NewInteger creates a new Integer from []byte using ReadInteger. // NewInteger creates a new Integer from []byte using ReadInteger.
// Limits the length of the created Integer to MAX_INTEGER_SIZE. // Limits the length of the created Integer to MAX_INTEGER_SIZE.
// Returns a pointer to Integer and the remaining bytes. // Returns a pointer to Integer unlike ReadInteger.
// Returns an error if size is invalid or there isn't enough data. func NewInteger(bytes []byte, size int) (integer *Integer, remainder []byte, err error) {
func NewInteger(bytes []byte, size int) (*Integer, []byte, error) { integerSize := MAX_INTEGER_SIZE
if size <= 0 || size > MAX_INTEGER_SIZE { if size < MAX_INTEGER_SIZE {
return nil, bytes, ErrInvalidSize integerSize = size
} }
if len(bytes) < size { intBytes := bytes[:integerSize]
return nil, bytes, ErrInsufficientData remainder = bytes[integerSize:]
} i, _ := ReadInteger(intBytes, integerSize)
integer = &i
integer, remainder, err := ReadInteger(bytes, size) return
if err != nil {
return nil, bytes, err
}
return &integer, remainder, nil
} }
// NewIntegerFromInt creates a new Integer from a Go integer of a specified []byte length. // NewIntegerFromInt creates a new Integer from a Go integer of a specified []byte length.
// The value must be non-negative and fit within the specified number of bytes. func NewIntegerFromInt(value int, size int) (integer *Integer, err error) {
// Returns an error if the size is invalid or the value cannot be represented. bytes := make([]byte, MAX_INTEGER_SIZE)
func NewIntegerFromInt(value int, size int) (*Integer, error) { binary.BigEndian.PutUint64(bytes, uint64(value))
if size <= 0 || size > MAX_INTEGER_SIZE { integerSize := MAX_INTEGER_SIZE
return nil, ErrInvalidSize if size < MAX_INTEGER_SIZE {
integerSize = size
} }
if value < 0 { objinteger, _, err := NewInteger(bytes[MAX_INTEGER_SIZE-integerSize:], integerSize)
return nil, ErrNegativeValue integer = objinteger
} return
// Check if value fits in specified size
maxVal := int(math.Pow(2, float64(size*8))) - 1
if value > maxVal {
return nil, ErrIntegerOverflow
}
buf := make([]byte, MAX_INTEGER_SIZE)
binary.BigEndian.PutUint64(buf, uint64(value))
data := buf[MAX_INTEGER_SIZE-size:]
integer := Integer(data)
return &integer, nil
} }
// intFromBytes interprets a slice of bytes from length 0 to length 8 as a big-endian // Interpret a slice of bytes from length 0 to length 8 as a big-endian
// integer and returns an int representation. // integer and return an int representation.
// Returns an error if the value would overflow on the current platform func intFromBytes(number []byte) (value int) {
// or if the input is invalid. num_len := len(number)
func intFromBytes(number []byte) (int, error) { if num_len < MAX_INTEGER_SIZE {
if len(number) == 0 { number = append(
return 0, nil make([]byte, MAX_INTEGER_SIZE-num_len),
number...,
)
} }
if len(number) > MAX_INTEGER_SIZE { value = int(binary.BigEndian.Uint64(number))
return 0, ErrInvalidSize return
} }
padded := make([]byte, MAX_INTEGER_SIZE)
copy(padded[MAX_INTEGER_SIZE-len(number):], number)
val := int64(binary.BigEndian.Uint64(padded))
if val > math.MaxInt32 || val < math.MinInt32 {
return 0, ErrIntegerOverflow
}
return int(val), nil
}

View File

@@ -30,32 +30,3 @@ func TestIsZeroWithNoData(t *testing.T) {
assert.Equal(integer.Int(), 0, "Integer() did not correctly parse zero length byte slice") assert.Equal(integer.Int(), 0, "Integer() did not correctly parse zero length byte slice")
} }
func TestIntegerEdgeCases(t *testing.T) {
tests := []struct {
name string
input []byte
size int
wantErr bool
wantInt int
}{
{"empty input", []byte{}, 1, true, 0},
{"zero size", []byte{1}, 0, true, 0},
{"oversized", []byte{1}, 9, true, 0},
{"valid small", []byte{42}, 1, false, 42},
{"valid max", []byte{1, 2, 3, 4, 5, 6, 7, 8}, 8, false, 72623859790382856},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
i, _, err := NewInteger(tt.input, tt.size)
if (err != nil) != tt.wantErr {
t.Errorf("NewInteger() error = %v, wantErr %v", err, tt.wantErr)
return
}
if err == nil && i.Int() != tt.wantInt {
t.Errorf("Integer.Int() = %v, want %v", i.Int(), tt.wantInt)
}
})
}
}

View File

@@ -2,7 +2,6 @@ package data
import ( import (
"errors" "errors"
"fmt"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@@ -98,20 +97,34 @@ func (mapping *Mapping) HasDuplicateKeys() bool {
} }
// GoMapToMapping converts a Go map of unformatted strings to *Mapping. // GoMapToMapping converts a Go map of unformatted strings to *Mapping.
func GoMapToMapping(gomap map[string]string) (*Mapping, error) { func GoMapToMapping(gomap map[string]string) (mapping *Mapping, err error) {
log.WithFields(logrus.Fields{
"input_map_size": len(gomap),
}).Debug("Converting Go map to Mapping")
map_vals := MappingValues{} map_vals := MappingValues{}
for k, v := range gomap { for k, v := range gomap {
key_str, err := ToI2PString(k) key_str, kerr := ToI2PString(k)
if err != nil { if kerr != nil {
return nil, fmt.Errorf("key conversion error: %w", err) log.WithError(kerr).Error("Failed to convert key to I2PString")
err = kerr
return
} }
val_str, err := ToI2PString(v) val_str, verr := ToI2PString(v)
if err != nil { if verr != nil {
return nil, fmt.Errorf("value conversion error: %w", err) log.WithError(verr).Error("Failed to convert value to I2PString")
err = verr
return
} }
map_vals = append(map_vals, [2]I2PString{key_str, val_str}) map_vals = append(
map_vals,
[2]I2PString{key_str, val_str},
)
} }
return ValuesToMapping(map_vals), nil mapping = ValuesToMapping(map_vals)
log.WithFields(logrus.Fields{
"mapping_size": len(map_vals),
}).Debug("Successfully converted Go map to Mapping")
return
} }
// Check if the string parsing error indicates that the Mapping // Check if the string parsing error indicates that the Mapping
@@ -139,37 +152,10 @@ func beginsWith(bytes []byte, chr byte) bool {
return result return result
} }
func (mapping *Mapping) addValue(key, value I2PString) error {
for _, pair := range *mapping.vals {
existingKey, _ := pair[0].Data()
newKey, _ := key.Data()
if existingKey == newKey {
return fmt.Errorf("duplicate key: %s", newKey)
}
}
*mapping.vals = append(*mapping.vals, [2]I2PString{key, value})
return nil
}
// ReadMapping returns Mapping from a []byte. // ReadMapping returns Mapping from a []byte.
// The remaining bytes after the specified length are also returned. // The remaining bytes after the specified length are also returned.
// Returns a list of errors that occurred during parsing. // Returns a list of errors that occurred during parsing.
const MaxMappingSize = 65535 // Match Java I2P's maximum mapping size
func ReadMapping(bytes []byte) (mapping Mapping, remainder []byte, err []error) { func ReadMapping(bytes []byte) (mapping Mapping, remainder []byte, err []error) {
if len(bytes) < 3 {
err = append(err, errors.New("mapping data too short"))
return
}
size, remainder, e := NewInteger(bytes, 2)
if e != nil {
log.WithError(e).Error("Failed to read Mapping size")
err = append(err, e)
}
if size.Int() > MaxMappingSize {
err = append(err, fmt.Errorf("mapping size %d exceeds maximum %d", size.Int(), MaxMappingSize))
return
}
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"input_length": len(bytes), "input_length": len(bytes),
}).Debug("Reading Mapping from bytes") }).Debug("Reading Mapping from bytes")
@@ -182,16 +168,16 @@ func ReadMapping(bytes []byte) (mapping Mapping, remainder []byte, err []error)
err = append(err, e) err = append(err, e)
return return
} }
size, remainder, e := NewInteger(bytes, 2)
if e != nil {
log.WithError(e).Error("Failed to read Mapping size")
err = append(err, e)
}
if size.Int() == 0 { if size.Int() == 0 {
log.Warn("Mapping size is zero") log.Warn("Mapping size is zero")
return return
} }
mapping.size = size mapping.size = size
if mapping.size.Int() > len(remainder) {
err = append(err, fmt.Errorf("mapping size %d exceeds available data length %d",
mapping.size.Int(), len(remainder)))
return
}
map_bytes := remainder[:mapping.size.Int()] map_bytes := remainder[:mapping.size.Int()]
remainder = remainder[mapping.size.Int():] remainder = remainder[mapping.size.Int():]
if len(remainder) == 0 { if len(remainder) == 0 {

View File

@@ -1,7 +1,6 @@
package data package data
import ( import (
"bytes"
"errors" "errors"
"sort" "sort"
@@ -12,37 +11,20 @@ import (
type MappingValues [][2]I2PString type MappingValues [][2]I2PString
func (m MappingValues) Get(key I2PString) I2PString { func (m MappingValues) Get(key I2PString) I2PString {
if key == nil { keyBytes, _ := key.Data()
return nil
}
keyBytes, err := key.Data()
if err != nil {
return nil
}
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"key": string(keyBytes), "key": string(keyBytes),
}).Debug("Searching for key in MappingValues") }).Debug("Searching for key in MappingValues")
for _, pair := range m { for _, pair := range m {
if pair[0] == nil { kb, _ := pair[0][0:].Data()
continue
}
kb, err := pair[0].Data()
if err != nil {
continue
}
if kb == keyBytes { if kb == keyBytes {
data, _ := pair[1].Data()
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"key": string(keyBytes), "key": string(keyBytes),
"value": string(data), "value": string(pair[1][1:]),
}).Debug("Found matching key in MappingValues") }).Debug("Found matching key in MappingValues")
return pair[1] return pair[1]
} }
} }
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"key": string(keyBytes), "key": string(keyBytes),
}).Debug("Key not found in MappingValues") }).Debug("Key not found in MappingValues")
@@ -82,15 +64,10 @@ func ValuesToMapping(values MappingValues) *Mapping {
// In practice routers do not seem to allow duplicate keys. // In practice routers do not seem to allow duplicate keys.
func mappingOrder(values MappingValues) { func mappingOrder(values MappingValues) {
sort.SliceStable(values, func(i, j int) bool { sort.SliceStable(values, func(i, j int) bool {
data1, err1 := values[i][0].Data() // Lexographic sort on keys only
data2, err2 := values[j][0].Data() data1, _ := values[i][0].Data()
data2, _ := values[j][0].Data()
// Handle error cases by treating them as "less than" return data1 < data2
if err1 != nil || err2 != nil {
return err1 == nil
}
return bytes.Compare([]byte(data1), []byte(data2)) < 0
}) })
} }
@@ -178,7 +155,7 @@ func ReadMappingValues(remainder []byte, map_length Integer) (values *MappingVal
"reason": "duplicate key in mapping", "reason": "duplicate key in mapping",
"key": string(key_str), "key": string(key_str),
}).Error("mapping format violation") }).Error("mapping format violation")
log.Printf("DUPE: %s", key_str) log.Warnf("DUPE: %s", key_str)
errs = append(errs, errors.New("mapping format violation, duplicate key in mapping")) errs = append(errs, errors.New("mapping format violation, duplicate key in mapping"))
// Based on other implementations this does not seem to happen often? // Based on other implementations this does not seem to happen often?
// Java throws an exception in this case, the base object is a Hashmap so the value is overwritten and an exception is thrown. // Java throws an exception in this case, the base object is a Hashmap so the value is overwritten and an exception is thrown.
@@ -193,7 +170,7 @@ func ReadMappingValues(remainder []byte, map_length Integer) (values *MappingVal
"value:": string(remainder), "value:": string(remainder),
}).Warn("mapping format violation") }).Warn("mapping format violation")
errs = append(errs, errors.New("mapping format violation, expected =")) errs = append(errs, errors.New("mapping format violation, expected ="))
log.Printf("ERRVAL: %s", remainder) log.Warnf("ERRVAL: %s", remainder)
break break
} else { } else {
remainder = remainder[1:] remainder = remainder[1:]
@@ -240,6 +217,6 @@ func ReadMappingValues(remainder []byte, map_length Integer) (values *MappingVal
"remainder_length": len(remainder_bytes), "remainder_length": len(remainder_bytes),
"error_count": len(errs), "error_count": len(errs),
}).Debug("Finished reading MappingValues") }).Debug("Finished reading MappingValues")
remainder_bytes = remainder
return return
} }

View File

@@ -2,7 +2,6 @@ package data
import ( import (
"fmt" "fmt"
"reflect"
"testing" "testing"
) )
@@ -46,41 +45,3 @@ func TestMappingOrderSortsValuesThenKeys(t *testing.T) {
} }
} }
} }
func TestMappingValuesEdgeCases(t *testing.T) {
k1, _ := ToI2PString("test")
tests := []struct {
name string
mv MappingValues
key I2PString
want I2PString
}{
{
name: "nil key",
mv: MappingValues{},
key: nil,
want: nil,
},
{
name: "empty mapping",
mv: MappingValues{},
key: k1,
want: nil,
},
{
name: "nil value in pair",
mv: MappingValues{{k1, nil}},
key: k1,
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.mv.Get(tt.key)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("MappingValues.Get() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -111,46 +111,46 @@ func (key_certificate KeyCertificate) Data() ([]byte, error) {
} }
// SigningPublicKeyType returns the signingPublicKey type as a Go integer. // SigningPublicKeyType returns the signingPublicKey type as a Go integer.
func (key_certificate KeyCertificate) SigningPublicKeyType() int { func (key_certificate KeyCertificate) SigningPublicKeyType() (signing_pubkey_type int) {
spk_type := key_certificate.spkType.Int() signing_pubkey_type = key_certificate.spkType.Int()
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"signing_pubkey_type": spk_type, "signing_pubkey_type": signing_pubkey_type,
}).Debug("Retrieved signingPublicKey type") }).Debug("Retrieved signingPublicKey type")
return spk_type return key_certificate.spkType.Int()
} }
func (key_certificate KeyCertificate) CryptoSize() int { // PublicKeyType returns the publicKey type as a Go integer.
switch key_certificate.PublicKeyType() { func (key_certificate KeyCertificate) PublicKeyType() (pubkey_type int) {
case KEYCERT_CRYPTO_ELG: pubkey_type = key_certificate.cpkType.Int()
return KEYCERT_CRYPTO_ELG_SIZE log.WithFields(logrus.Fields{
case KEYCERT_CRYPTO_P256: "pubkey_type": pubkey_type,
return KEYCERT_CRYPTO_P256_SIZE }).Debug("Retrieved publicKey type")
case KEYCERT_CRYPTO_P384: return key_certificate.cpkType.Int()
return KEYCERT_CRYPTO_P384_SIZE
case KEYCERT_CRYPTO_P521:
return KEYCERT_CRYPTO_P521_SIZE
case KEYCERT_CRYPTO_X25519:
return KEYCERT_CRYPTO_X25519_SIZE
default:
return 0
}
} }
// ConstructPublicKey returns a publicKey constructed using any excess data that may be stored in the KeyCertififcate.
// Returns enr errors encountered while parsing.
func (key_certificate KeyCertificate) ConstructPublicKey(data []byte) (public_key crypto.PublicKey, err error) { func (key_certificate KeyCertificate) ConstructPublicKey(data []byte) (public_key crypto.PublicKey, err error) {
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"input_length": len(data), "input_length": len(data),
}).Debug("Constructing publicKey from keyCertificate") }).Debug("Constructing publicKey from keyCertificate")
key_type := key_certificate.PublicKeyType()
key_type := key_certificate.PublicKeyType() if err != nil {
return
data_len := len(data) }
if data_len < key_certificate.CryptoSize() { data_len := len(data)
return nil, errors.New("error constructing public key: not enough data") if data_len < key_certificate.CryptoSize() {
} log.WithFields(logrus.Fields{
"at": "(keyCertificate) ConstructPublicKey",
// Implementation missing here - needs to construct appropriate key type "data_len": data_len,
switch key_type { "required_len": KEYCERT_PUBKEY_SIZE,
case KEYCERT_CRYPTO_ELG: "reason": "not enough data",
}).Error("error constructing public key")
err = errors.New("error constructing public key: not enough data")
return
}
switch key_type {
case KEYCERT_CRYPTO_ELG:
var elg_key crypto.ElgPublicKey var elg_key crypto.ElgPublicKey
copy(elg_key[:], data[KEYCERT_PUBKEY_SIZE-KEYCERT_CRYPTO_ELG_SIZE:KEYCERT_PUBKEY_SIZE]) copy(elg_key[:], data[KEYCERT_PUBKEY_SIZE-KEYCERT_CRYPTO_ELG_SIZE:KEYCERT_PUBKEY_SIZE])
public_key = elg_key public_key = elg_key
@@ -160,25 +160,13 @@ func (key_certificate KeyCertificate) ConstructPublicKey(data []byte) (public_ke
copy(ed25519_key[:], data[KEYCERT_PUBKEY_SIZE-KEYCERT_CRYPTO_ELG_SIZE:KEYCERT_PUBKEY_SIZE]) copy(ed25519_key[:], data[KEYCERT_PUBKEY_SIZE-KEYCERT_CRYPTO_ELG_SIZE:KEYCERT_PUBKEY_SIZE])
public_key = ed25519_key public_key = ed25519_key
log.Debug("Constructed Ed25519PublicKey") log.Debug("Constructed Ed25519PublicKey")
case KEYCERT_CRYPTO_P256: default:
//return crypto.CreatePublicKey(data[:KEYCERT_CRYPTO_P256_SIZE]) log.WithFields(logrus.Fields{
case KEYCERT_CRYPTO_P384: "key_type": key_type,
//return crypto.CreatePublicKey(data[:KEYCERT_CRYPTO_P384_SIZE]) }).Warn("Unknown public key type")
case KEYCERT_CRYPTO_P521: }
//return crypto.CreatePublicKey(data[:KEYCERT_CRYPTO_P521_SIZE])
default:
return nil, errors.New("error constructing public key: unknown key type")
}
return nil, errors.New("error constructing public key: unknown key type")
}
// PublicKeyType returns the publicKey type as a Go integer. return
func (key_certificate KeyCertificate) PublicKeyType() int {
pk_type := key_certificate.cpkType.Int()
log.WithFields(logrus.Fields{
"pubkey_type": pk_type,
}).Debug("Retrieved publicKey type")
return pk_type
} }
// ConstructSigningPublicKey returns a SingingPublicKey constructed using any excess data that may be stored in the KeyCertificate. // ConstructSigningPublicKey returns a SingingPublicKey constructed using any excess data that may be stored in the KeyCertificate.
@@ -187,7 +175,7 @@ func (key_certificate KeyCertificate) ConstructSigningPublicKey(data []byte) (si
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"input_length": len(data), "input_length": len(data),
}).Debug("Constructing signingPublicKey from keyCertificate") }).Debug("Constructing signingPublicKey from keyCertificate")
signing_key_type := key_certificate.PublicKeyType() signing_key_type := key_certificate.SigningPublicKeyType()
if err != nil { if err != nil {
return return
} }
@@ -209,51 +197,54 @@ func (key_certificate KeyCertificate) ConstructSigningPublicKey(data []byte) (si
signing_public_key = dsa_key signing_public_key = dsa_key
log.Debug("Constructed DSAPublicKey") log.Debug("Constructed DSAPublicKey")
case KEYCERT_SIGN_P256: case KEYCERT_SIGN_P256:
var ec_key crypto.ECP256PublicKey var ec_p256_key crypto.ECP256PublicKey
copy(ec_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_P256_SIZE:KEYCERT_SPK_SIZE]) copy(ec_p256_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_P256_SIZE:KEYCERT_SPK_SIZE])
signing_public_key = ec_key signing_public_key = ec_p256_key
log.Debug("Constructed ECP256PublicKey") log.Debug("Constructed P256PublicKey")
case KEYCERT_SIGN_P384: case KEYCERT_SIGN_P384:
var ec_key crypto.ECP384PublicKey var ec_p384_key crypto.ECP384PublicKey
copy(ec_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_P384_SIZE:KEYCERT_SPK_SIZE]) copy(ec_p384_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_P384_SIZE:KEYCERT_SPK_SIZE])
signing_public_key = ec_key signing_public_key = ec_p384_key
log.Debug("Constructed ECP384PublicKey") log.Debug("Constructed P384PublicKey")
case KEYCERT_SIGN_P521: case KEYCERT_SIGN_P521:
var ec_key crypto.ECP521PublicKey /*var ec_p521_key crypto.ECP521PublicKey
extra := KEYCERT_SIGN_P521_SIZE - KEYCERT_SPK_SIZE copy(ec_p521_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_P521_SIZE:KEYCERT_SPK_SIZE])
copy(ec_key[:], data) signing_public_key = ec_p521_key
copy(ec_key[KEYCERT_SPK_SIZE:], key_certificate.Certificate.RawBytes()[4:4+extra]) log.Debug("Constructed P521PublicKey")*/
signing_public_key = ec_key panic("unimplemented P521SigningPublicKey")
log.Debug("Constructed ECP521PublicKey")
case KEYCERT_SIGN_RSA2048: case KEYCERT_SIGN_RSA2048:
// var rsa_key crypto.RSA2048PublicKey /*var rsa2048_key crypto.RSA2048PublicKey
// extra := KEYCERT_SIGN_RSA2048_SIZE - 128 copy(rsa2048_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_RSA2048_SIZE:KEYCERT_SPK_SIZE])
// copy(rsa_key[:], data) signing_public_key = rsa2048_key
// copy(rsa_key[128:], key_certificate[4:4+extra]) log.Debug("Constructed RSA2048PublicKey")*/
// signing_public_key = rsa_key panic("unimplemented RSA2048SigningPublicKey")
log.WithFields(logrus.Fields{
"signing_key_type": signing_key_type,
}).Warn("Signing key type KEYCERT_SIGN_RSA2048 not implemented")
case KEYCERT_SIGN_RSA3072: case KEYCERT_SIGN_RSA3072:
log.WithFields(logrus.Fields{ /*var rsa3072_key crypto.RSA3072PublicKey
"signing_key_type": signing_key_type, copy(rsa3072_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_RSA3072_SIZE:KEYCERT_SPK_SIZE])
}).Warn("Signing key type KEYCERT_SIGN_RSA3072 not implemented") signing_public_key = rsa3072_key
log.Debug("Constructed RSA3072PublicKey")*/
panic("unimplemented RSA3072SigningPublicKey")
case KEYCERT_SIGN_RSA4096: case KEYCERT_SIGN_RSA4096:
log.WithFields(logrus.Fields{ /*var rsa4096_key crypto.RSA4096PublicKey
"signing_key_type": signing_key_type, copy(rsa4096_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_RSA4096_SIZE:KEYCERT_SPK_SIZE])
}).Warn("Signing key type KEYCERT_SIGN_RSA4096 not implemented") signing_public_key = rsa4096_key
log.Debug("Constructed RSA4096PublicKey")*/
panic("unimplemented RSA4096SigningPublicKey")
case KEYCERT_SIGN_ED25519: case KEYCERT_SIGN_ED25519:
log.WithFields(logrus.Fields{ var ed25519_key crypto.Ed25519PublicKey
"signing_key_type": signing_key_type, copy(ed25519_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_ED25519_SIZE:KEYCERT_SPK_SIZE])
}).Warn("Signing key type KEYCERT_SIGN_ED25519 not implemented") signing_public_key = ed25519_key
log.Debug("Constructed Ed25519PublicKey")
case KEYCERT_SIGN_ED25519PH: case KEYCERT_SIGN_ED25519PH:
log.WithFields(logrus.Fields{ var ed25519ph_key crypto.Ed25519PublicKey
"signing_key_type": signing_key_type, copy(ed25519ph_key[:], data[KEYCERT_SPK_SIZE-KEYCERT_SIGN_ED25519PH_SIZE:KEYCERT_SPK_SIZE])
}).Warn("Signing key type KEYCERT_SIGN_ED25519PH not implemented") signing_public_key = ed25519ph_key
log.Debug("Constructed Ed25519PHPublicKey")
default: default:
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
"signing_key_type": signing_key_type, "signing_key_type": signing_key_type,
}).Warn("Unknown signing key type") }).Warn("Unknown signing key type")
panic(err)
} }
return return
@@ -281,6 +272,24 @@ func (key_certificate KeyCertificate) SignatureSize() (size int) {
return sizes[int(key_type)] return sizes[int(key_type)]
} }
// CryptoSize return the size of a Public Key corresponding to the Key Certificate's publicKey type.
func (key_certificate KeyCertificate) CryptoSize() (size int) {
sizes := map[int]int{
KEYCERT_CRYPTO_ELG: KEYCERT_CRYPTO_ELG_SIZE,
KEYCERT_CRYPTO_P256: KEYCERT_CRYPTO_P256_SIZE,
KEYCERT_CRYPTO_P384: KEYCERT_CRYPTO_P384_SIZE,
KEYCERT_CRYPTO_P521: KEYCERT_CRYPTO_P521_SIZE,
KEYCERT_CRYPTO_X25519: KEYCERT_CRYPTO_X25519_SIZE,
}
key_type := key_certificate.PublicKeyType()
size = sizes[int(key_type)]
log.WithFields(logrus.Fields{
"key_type": key_type,
"crypto_size": size,
}).Debug("Retrieved crypto size")
return sizes[int(key_type)]
}
// NewKeyCertificate creates a new *KeyCertificate from []byte using ReadCertificate. // NewKeyCertificate creates a new *KeyCertificate from []byte using ReadCertificate.
// The remaining bytes after the specified length are also returned. // The remaining bytes after the specified length are also returned.
// Returns a list of errors that occurred during parsing. // Returns a list of errors that occurred during parsing.
@@ -295,10 +304,6 @@ func NewKeyCertificate(bytes []byte) (key_certificate *KeyCertificate, remainder
log.WithError(err).Error("Failed to read Certificate") log.WithError(err).Error("Failed to read Certificate")
return return
} }
if certificate.Type() != 5 { // Key certificate type must be 5
return nil, nil, errors.New("error parsing key certificate: invalid certificate type")
}
if len(bytes) < KEYCERT_MIN_SIZE { if len(bytes) < KEYCERT_MIN_SIZE {
log.WithError(err).Error("keyCertificate data too short") log.WithError(err).Error("keyCertificate data too short")
err = errors.New("error parsing key certificate: not enough data") err = errors.New("error parsing key certificate: not enough data")

View File

@@ -123,7 +123,7 @@ func TestConstructSigningPublicKeyWithP521(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
key_cert, _, err := NewKeyCertificate([]byte{0x05, 0x00, 0x08, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00}) key_cert, _, err := NewKeyCertificate([]byte{0x05, 0x00, 0x08, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00})
data := make([]byte, 128) data := make([]byte, 132)
spk, err := key_cert.ConstructSigningPublicKey(data) spk, err := key_cert.ConstructSigningPublicKey(data)
assert.Nil(err, "ConstructSigningPublicKey() with P521 returned err on valid data") assert.Nil(err, "ConstructSigningPublicKey() with P521 returned err on valid data")

View File

@@ -0,0 +1,197 @@
package router_info
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func consolidateNetDb(sourcePath string, destPath string) error {
// Create destination directory if it doesn't exist
if err := os.MkdirAll(destPath, 0o755); err != nil {
return fmt.Errorf("failed to create destination directory: %v", err)
}
// Walk through all subdirectories
return filepath.Walk(sourcePath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("error accessing path %q: %v", path, err)
}
// Skip if it's a directory
if info.IsDir() {
return nil
}
// Check if this is a routerInfo file
if strings.HasPrefix(info.Name(), "routerInfo-") && strings.HasSuffix(info.Name(), ".dat") {
// Create source file path
srcFile := path
// Create destination file path
dstFile := filepath.Join(destPath, info.Name())
// Copy the file
if err := copyFile(srcFile, dstFile); err != nil {
return fmt.Errorf("failed to copy %s: %v", info.Name(), err)
}
}
return nil
})
}
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
_, err = io.Copy(destFile, sourceFile)
return err
}
func consolidateAllNetDbs(tempDir string) error {
// Common paths for I2P and I2Pd netDb
i2pPath := filepath.Join(os.Getenv("HOME"), ".i2p/netDb")
i2pdPath := filepath.Join(os.Getenv("HOME"), ".i2pd/netDb")
// Create the temp directory
if err := os.MkdirAll(tempDir, 0o755); err != nil {
return fmt.Errorf("failed to create temp directory: %v", err)
}
// Try to consolidate I2P netDb
if _, err := os.Stat(i2pPath); err == nil {
if err := consolidateNetDb(i2pPath, tempDir); err != nil {
fmt.Printf("Warning: Error processing I2P netDb: %v\n", err)
}
}
// Try to consolidate I2Pd netDb
if _, err := os.Stat(i2pdPath); err == nil {
if err := consolidateNetDb(i2pdPath, tempDir); err != nil {
fmt.Printf("Warning: Error processing I2Pd netDb: %v\n", err)
}
}
return nil
}
func cleanupTempDir(path string) error {
if err := os.RemoveAll(path); err != nil {
return fmt.Errorf("failed to cleanup temporary directory %s: %v", path, err)
}
return nil
}
func createTempNetDbDir() (string, error) {
// Get system's temp directory in a platform-independent way
baseDir := os.TempDir()
// Create unique directory name with timestamp
timestamp := time.Now().Unix()
dirName := fmt.Sprintf("go-i2p-testfiles-%d", timestamp)
// Join paths in a platform-independent way
tempDir := filepath.Join(baseDir, dirName)
// Create the directory with appropriate permissions
err := os.MkdirAll(tempDir, 0o755)
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %v", err)
}
return tempDir, nil
}
func Test10K(t *testing.T) {
i2pPath := filepath.Join(os.Getenv("HOME"), ".i2p/netDb")
i2pdPath := filepath.Join(os.Getenv("HOME"), ".i2pd/netDb")
// Skip if neither directory exists
if _, err := os.Stat(i2pPath); os.IsNotExist(err) {
if _, err := os.Stat(i2pdPath); os.IsNotExist(err) {
t.Skip("Neither .i2p nor .i2pd netDb directories exist, so we will skip.")
}
}
tempDir, err := createTempNetDbDir()
if err != nil {
t.Fatalf("Failed to create temp directory: %v", err)
}
// defer cleanupTempDir(tempDir)
if err := consolidateAllNetDbs(tempDir); err != nil {
t.Fatalf("Failed to consolidate netDbs: %v", err)
}
time.Sleep(1 * time.Second)
targetDir, err := createTempNetDbDir()
if err != nil {
panic(err)
}
// Read and process all router info files
files, err := os.ReadDir(tempDir)
if err != nil {
t.Fatalf("Failed to read temp directory: %v", err)
}
for _, file := range files {
if !file.IsDir() && strings.HasPrefix(file.Name(), "routerInfo-") {
// Read the router info file
log.Println("RI LOAD: ", file.Name())
data, err := os.ReadFile(filepath.Join(tempDir, file.Name()))
if err != nil {
t.Logf("Failed to read file %s: %v", file.Name(), err)
continue
}
// Parse the router info
// fmt.Printf("data: %s\n", string(data))
routerInfo, _, err := ReadRouterInfo(data)
if err != nil {
t.Logf("Failed to parse router info from %s: %v", file.Name(), err)
continue
}
// Write the router info to the target directory
routerBytes, err := routerInfo.Bytes()
if err != nil {
t.Logf("Failed to serialize router info %s: %v", file.Name(), err)
continue
}
err = os.WriteFile(filepath.Join(targetDir, file.Name()), routerBytes, 0o644)
if err != nil {
t.Logf("Failed to write router info %s: %v", file.Name(), err)
continue
}
}
}
// Cleanup both directories
if err := cleanupTempDir(tempDir); err != nil {
log.WithError(err).Error("Failed to cleanup temp directory")
t.Errorf("Failed to cleanup temp directory: %v", err)
} else {
log.Debug("Successfully cleaned up temp directory")
}
if err := cleanupTempDir(targetDir); err != nil {
log.WithError(err).Error("Failed to cleanup target directory")
t.Errorf("Failed to cleanup target directory: %v", err)
} else {
log.Debug("Successfully cleaned up target directory")
}
}

View File

@@ -4,11 +4,12 @@ package router_info
import ( import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"github.com/go-i2p/go-i2p/lib/common/certificate"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-i2p/go-i2p/lib/common/certificate"
"github.com/go-i2p/go-i2p/lib/crypto" "github.com/go-i2p/go-i2p/lib/crypto"
"github.com/go-i2p/go-i2p/lib/util/logger" "github.com/go-i2p/go-i2p/lib/util/logger"

View File

@@ -4,10 +4,11 @@ import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"github.com/go-i2p/go-i2p/lib/common/signature"
"testing" "testing"
"time" "time"
"github.com/go-i2p/go-i2p/lib/common/signature"
"github.com/go-i2p/go-i2p/lib/common/certificate" "github.com/go-i2p/go-i2p/lib/common/certificate"
"github.com/go-i2p/go-i2p/lib/common/data" "github.com/go-i2p/go-i2p/lib/common/data"
"github.com/go-i2p/go-i2p/lib/common/router_address" "github.com/go-i2p/go-i2p/lib/common/router_address"

View File

@@ -5,12 +5,57 @@ type (
RSA2048PrivateKey [512]byte RSA2048PrivateKey [512]byte
) )
// Bytes implements SigningPublicKey.
func (r RSA2048PublicKey) Bytes() []byte {
panic("unimplemented")
}
// Len implements SigningPublicKey.
func (r RSA2048PublicKey) Len() int {
panic("unimplemented")
}
// NewVerifier implements SigningPublicKey.
func (r RSA2048PublicKey) NewVerifier() (Verifier, error) {
panic("unimplemented")
}
type ( type (
RSA3072PublicKey [384]byte RSA3072PublicKey [384]byte
RSA3072PrivateKey [786]byte RSA3072PrivateKey [786]byte
) )
// Bytes implements SigningPublicKey.
func (r RSA3072PublicKey) Bytes() []byte {
panic("unimplemented")
}
// Len implements SigningPublicKey.
func (r RSA3072PublicKey) Len() int {
panic("unimplemented")
}
// NewVerifier implements SigningPublicKey.
func (r RSA3072PublicKey) NewVerifier() (Verifier, error) {
panic("unimplemented")
}
type ( type (
RSA4096PublicKey [512]byte RSA4096PublicKey [512]byte
RSA4096PrivateKey [1024]byte RSA4096PrivateKey [1024]byte
) )
// Bytes implements SigningPublicKey.
func (r RSA4096PublicKey) Bytes() []byte {
panic("unimplemented")
}
// Len implements SigningPublicKey.
func (r RSA4096PublicKey) Len() int {
panic("unimplemented")
}
// NewVerifier implements SigningPublicKey.
func (r RSA4096PublicKey) NewVerifier() (Verifier, error) {
panic("unimplemented")
}

View File

@@ -76,7 +76,7 @@ func (r Reseed) SingleReseed(uri string) ([]router_info.RouterInfo, error) {
log.WithError(err).Error("Failed to read SU3 file signature") log.WithError(err).Error("Failed to read SU3 file signature")
return nil, err return nil, err
} }
log.Println("warning: this doesn't validate the signature yet", signature) log.Debug("warning: this doesn't validate the signature yet", signature)
log.Warn("Doesn't validate the signature yet", logrus.Fields{"signature": signature}) log.Warn("Doesn't validate the signature yet", logrus.Fields{"signature": signature})
} }
zip := filepath.Join(config.RouterConfigProperties.NetDb.Path, "reseed.zip") zip := filepath.Join(config.RouterConfigProperties.NetDb.Path, "reseed.zip")

View File

@@ -152,7 +152,7 @@ func (db *StdNetDB) RecalculateSize() (err error) {
} }
if db.CheckFilePathValid(fname) { if db.CheckFilePathValid(fname) {
log.WithField("file_name", fname).Debug("Reading RouterInfo file") log.WithField("file_name", fname).Debug("Reading RouterInfo file")
log.Println("Reading in file:", fname) log.Debug("Reading in file:", fname)
b, err := os.ReadFile(fname) b, err := os.ReadFile(fname)
if err != nil { if err != nil {
log.WithError(err).Error("Failed to read RouterInfo file") log.WithError(err).Error("Failed to read RouterInfo file")
@@ -165,9 +165,9 @@ func (db *StdNetDB) RecalculateSize() (err error) {
} }
ih := ri.IdentHash().Bytes() ih := ri.IdentHash().Bytes()
log.WithError(err).Error("Failed to parse RouterInfo") log.WithError(err).Error("Failed to parse RouterInfo")
log.Printf("Read in IdentHash: %s", base32.EncodeToString(ih[:])) log.Debugf("Read in IdentHash: %s", base32.EncodeToString(ih[:]))
for _, addr := range ri.RouterAddresses() { for _, addr := range ri.RouterAddresses() {
log.Println(string(addr.Bytes())) log.Debug(string(addr.Bytes()))
log.WithField("address", string(addr.Bytes())).Debug("RouterInfo address") log.WithField("address", string(addr.Bytes())).Debug("RouterInfo address")
} }
if ent, ok := db.RouterInfos[ih]; !ok { if ent, ok := db.RouterInfos[ih]; !ok {
@@ -177,13 +177,13 @@ func (db *StdNetDB) RecalculateSize() (err error) {
} }
} else { } else {
log.Debug("RouterInfo already in memory cache") log.Debug("RouterInfo already in memory cache")
log.Println("entry previously found in table", ent, fname) log.Debug("entry previously found in table", ent, fname)
} }
ri = router_info.RouterInfo{} ri = router_info.RouterInfo{}
count++ count++
} else { } else {
log.WithField("file_path", fname).Warn("Invalid file path") log.WithField("file_path", fname).Warn("Invalid file path")
log.Println("Invalid path error") log.Debug("Invalid path error")
} }
return err return err
}) })

View File

@@ -36,7 +36,7 @@ func (c *NoiseSession) RunIncomingHandshake() error {
} }
log.Debug("Handshake message written successfully") log.Debug("Handshake message written successfully")
log.WithField("state", state).Debug("Handshake state after message write") log.WithField("state", state).Debug("Handshake state after message write")
log.Println(state) log.Debug(state)
c.handshakeComplete = true c.handshakeComplete = true
log.Debug("Incoming handshake completed successfully") log.Debug("Incoming handshake completed successfully")
return nil return nil

View File

@@ -39,7 +39,7 @@ func (c *NoiseSession) RunOutgoingHandshake() error {
} }
log.Debug("Handshake message written successfully") log.Debug("Handshake message written successfully")
log.WithField("state", state).Debug("Handshake state after message write") log.WithField("state", state).Debug("Handshake state after message write")
log.Println(state) log.Debug(state)
c.handshakeComplete = true c.handshakeComplete = true
log.Debug("Outgoing handshake completed successfully") log.Debug("Outgoing handshake completed successfully")
return nil return nil

View File

@@ -1,7 +1,7 @@
package logger package logger
import ( import (
"io/ioutil" "io"
"os" "os"
"strings" "strings"
"sync" "sync"
@@ -10,18 +10,96 @@ import (
) )
var ( var (
log *logrus.Logger log *Logger
once sync.Once once sync.Once
failFast string
) )
// Logger wraps logrus.Logger and adds the ability to make all warnings fatal
type Logger struct {
*logrus.Logger
}
// Entry wraps logrus.Entry and enables it to use our Logger
type Entry struct {
Logger
entry *logrus.Entry
}
// Warn wraps logrus.Warn and logs a fatal error if failFast is set
func (l *Logger) Warn(args ...interface{}) {
warnFatal(args)
l.Logger.Warn(args...)
}
// Warnf wraps logrus.Warnf and logs a fatal error if failFast is set
func (l *Logger) Warnf(format string, args ...interface{}) {
warnFatalf(format, args...)
l.Logger.Warnf(format, args...)
}
// Error wraps logrus.Error and logs a fatal error if failFast is set
func (l *Logger) Error(args ...interface{}) {
warnFatal(args)
l.Logger.Error(args...)
}
// Errorf wraps logrus.Errorf and logs a fatal error if failFast is set
func (l *Logger) Errorf(format string, args ...interface{}) {
warnFatalf(format, args...)
l.Logger.Errorf(format, args...)
}
// WithField wraps logrus.WithField and returns an Entry
func (l *Logger) WithField(key string, value interface{}) *Entry {
entry := l.Logger.WithField(key, value)
return &Entry{*l, entry}
}
// WithFields wraps logrus.WithFields and returns an Entry
func (l *Logger) WithFields(fields logrus.Fields) *Entry {
entry := l.Logger.WithFields(fields)
return &Entry{*l, entry}
}
// WithError wraps logrus.WithError and returns an Entry
func (l *Logger) WithError(err error) *Entry {
entry := l.Logger.WithError(err)
return &Entry{*l, entry}
}
func warnFatal(args ...interface{}) {
if failFast != "" {
log.Fatal(args)
}
}
func warnFatalf(format string, args ...interface{}) {
if failFast != "" {
log.Fatalf(format, args...)
}
}
func warnFail() {
if failFast != "" {
log.Error("FATAL ERROR")
}
}
// InitializeGoI2PLogger sets up all the necessary logging
func InitializeGoI2PLogger() { func InitializeGoI2PLogger() {
once.Do(func() { once.Do(func() {
log = logrus.New() log = &Logger{}
log.Logger = logrus.New()
// We do not want to log by default // We do not want to log by default
log.SetOutput(ioutil.Discard) log.SetOutput(io.Discard)
log.SetLevel(logrus.PanicLevel) log.SetLevel(logrus.PanicLevel)
// Check if DEBUG_I2P is set // Check if DEBUG_I2P is set
if logLevel := os.Getenv("DEBUG_I2P"); logLevel != "" { if logLevel := os.Getenv("DEBUG_I2P"); logLevel != "" {
failFast = os.Getenv("WARNFAIL_I2P")
if failFast != "" && logLevel == "" {
logLevel = "debug"
}
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
switch strings.ToLower(logLevel) { switch strings.ToLower(logLevel) {
case "debug": case "debug":
@@ -38,8 +116,8 @@ func InitializeGoI2PLogger() {
}) })
} }
// GetGoI2PLogger returns the initialized logger // GetGoI2PLogger returns the initialized Logger
func GetGoI2PLogger() *logrus.Logger { func GetGoI2PLogger() *Logger {
if log == nil { if log == nil {
InitializeGoI2PLogger() InitializeGoI2PLogger()
} }