提交 ac42bb79 作者: Richard Littauer

Capitalized DHT

License: MIT
Signed-off-by: 's avatarRichard Littauer <richard.littauer@gmail.com>
上级 f8eefba0
...@@ -158,7 +158,7 @@ USAGE: ...@@ -158,7 +158,7 @@ USAGE:
id Show info about ipfs peers id Show info about ipfs peers
bootstrap Add or remove bootstrap peers bootstrap Add or remove bootstrap peers
swarm Manage connections to the p2p network swarm Manage connections to the p2p network
dht Query the dht for values or peers dht Query the DHT for values or peers
ping Measure the latency of a connection ping Measure the latency of a connection
diag Print diagnostics diag Print diagnostics
......
...@@ -319,7 +319,7 @@ var getValueDhtCmd = &cmds.Command{ ...@@ -319,7 +319,7 @@ var getValueDhtCmd = &cmds.Command{
Helptext: cmds.HelpText{ Helptext: cmds.HelpText{
Tagline: "Run a 'GetValue' query through the DHT.", Tagline: "Run a 'GetValue' query through the DHT.",
ShortDescription: ` ShortDescription: `
GetValue will return the value stored in the dht at the given key. GetValue will return the value stored in the DHT at the given key.
`, `,
}, },
...@@ -422,7 +422,7 @@ var putValueDhtCmd = &cmds.Command{ ...@@ -422,7 +422,7 @@ var putValueDhtCmd = &cmds.Command{
Helptext: cmds.HelpText{ Helptext: cmds.HelpText{
Tagline: "Run a 'PutValue' query through the DHT.", Tagline: "Run a 'PutValue' query through the DHT.",
ShortDescription: ` ShortDescription: `
PutValue will store the given key value pair in the dht. PutValue will store the given key value pair in the DHT.
`, `,
}, },
......
...@@ -59,7 +59,7 @@ NETWORK COMMANDS ...@@ -59,7 +59,7 @@ NETWORK COMMANDS
id Show info about ipfs peers id Show info about ipfs peers
bootstrap Add or remove bootstrap peers bootstrap Add or remove bootstrap peers
swarm Manage connections to the p2p network swarm Manage connections to the p2p network
dht Query the dht for values or peers dht Query the DHT for values or peers
ping Measure the latency of a connection ping Measure the latency of a connection
diag Print diagnostics diag Print diagnostics
......
...@@ -16,7 +16,7 @@ There are multiple subpackages: ...@@ -16,7 +16,7 @@ There are multiple subpackages:
- `path` - path resolution over merkledag data structure - `path` - path resolution over merkledag data structure
- `peer` - identity + addresses of local and remote peers - `peer` - identity + addresses of local and remote peers
- `routing` - the routing system - `routing` - the routing system
- `routing/dht` - the dht default routing system implementation - `routing/dht` - the DHT default routing system implementation
- `swarm` - connection multiplexing, many peers and many transports - `swarm` - connection multiplexing, many peers and many transports
- `util` - various utilities - `util` - various utilities
......
...@@ -70,7 +70,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) { ...@@ -70,7 +70,7 @@ func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
// measure the RTT for latency measurements. // measure the RTT for latency measurements.
func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
log.Debugf("%s dht starting stream", dht.self) log.Debugf("%s DHT starting stream", dht.self)
s, err := dht.host.NewStream(ctx, ProtocolDHT, p) s, err := dht.host.NewStream(ctx, ProtocolDHT, p)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -108,7 +108,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message ...@@ -108,7 +108,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message
// sendMessage sends out a message // sendMessage sends out a message
func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
log.Debugf("%s dht starting stream", dht.self) log.Debugf("%s DHT starting stream", dht.self)
s, err := dht.host.NewStream(ctx, ProtocolDHT, p) s, err := dht.host.NewStream(ctx, ProtocolDHT, p)
if err != nil { if err != nil {
return err return err
......
...@@ -93,7 +93,7 @@ func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) { ...@@ -93,7 +93,7 @@ func connect(t *testing.T, ctx context.Context, a, b *IpfsDHT) {
func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) { func bootstrap(t *testing.T, ctx context.Context, dhts []*IpfsDHT) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
log.Debugf("bootstrapping dhts...") log.Debugf("Bootstrapping DHTs...")
// tried async. sequential fares much better. compare: // tried async. sequential fares much better. compare:
// 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2 // 100 async https://gist.github.com/jbenet/56d12f0578d5f34810b2
...@@ -391,7 +391,7 @@ func TestPeriodicBootstrap(t *testing.T) { ...@@ -391,7 +391,7 @@ func TestPeriodicBootstrap(t *testing.T) {
connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)])
} }
t.Logf("dhts are now connected to 1-2 others.", nDHTs) t.Logf("DHTs are now connected to 1-2 others.", nDHTs)
for _, dht := range dhts { for _, dht := range dhts {
rtlen := dht.routingTable.Size() rtlen := dht.routingTable.Size()
if rtlen > 2 { if rtlen > 2 {
......
...@@ -106,7 +106,7 @@ func (dht *IpfsDHT) checkLocalDatastore(k key.Key) (*pb.Record, error) { ...@@ -106,7 +106,7 @@ func (dht *IpfsDHT) checkLocalDatastore(k key.Key) (*pb.Record, error) {
rec := new(pb.Record) rec := new(pb.Record)
err = proto.Unmarshal(byts, rec) err = proto.Unmarshal(byts, rec)
if err != nil { if err != nil {
log.Debug("Failed to unmarshal dht record from datastore") log.Debug("Failed to unmarshal DHT record from datastore.")
return nil, err return nil, err
} }
......
...@@ -42,7 +42,7 @@ func (dht *IpfsDHT) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, err ...@@ -42,7 +42,7 @@ func (dht *IpfsDHT) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, err
} }
// last ditch effort: let's try the dht. // last ditch effort: let's try the dht.
log.Debugf("pk for %s not in peerstore, and peer failed. trying dht.", p) log.Debugf("pk for %s not in peerstore, and peer failed. Trying DHT.", p)
pkkey := routing.KeyForPublicKey(p) pkkey := routing.KeyForPublicKey(p)
val, err := dht.GetValue(ctxT, pkkey) val, err := dht.GetValue(ctxT, pkkey)
...@@ -77,14 +77,14 @@ func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.Pub ...@@ -77,14 +77,14 @@ func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.Pub
// node doesn't have key :( // node doesn't have key :(
record := pmes.GetRecord() record := pmes.GetRecord()
if record == nil { if record == nil {
return nil, fmt.Errorf("node not responding with its public key: %s", p) return nil, fmt.Errorf("Node not responding with its public key: %s", p)
} }
// Success! We were given the value. we don't need to check // Success! We were given the value. we don't need to check
// validity because a) we can't. b) we know the hash of the // validity because a) we can't. b) we know the hash of the
// key we're looking for. // key we're looking for.
val := record.GetValue() val := record.GetValue()
log.Debug("dht got a value from other peer.") log.Debug("DHT got a value from other peer.")
pk, err = ci.UnmarshalPublicKey(val) pk, err = ci.UnmarshalPublicKey(val)
if err != nil { if err != nil {
...@@ -100,7 +100,7 @@ func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.Pub ...@@ -100,7 +100,7 @@ func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.Pub
} }
// ok! it's valid. we got it! // ok! it's valid. we got it!
log.Debugf("dht got public key from node itself.") log.Debugf("DHT got public key from node itself.")
return pk, nil return pk, nil
} }
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论