Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
G
go-ipfs
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
jihao
go-ipfs
Commits
d7eb57f4
提交
d7eb57f4
authored
2月 18, 2015
作者:
Jeromy
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add worker to bitswap for reproviding new blocks
上级
d8f9f524
显示空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
156 行增加
和
91 行删除
+156
-91
blocks.go
blocks/blocks.go
+6
-0
bitswap.go
exchange/bitswap/bitswap.go
+17
-91
workers.go
exchange/bitswap/workers.go
+133
-0
没有找到文件。
blocks/blocks.go
浏览文件 @
d7eb57f4
...
...
@@ -42,3 +42,9 @@ func (b *Block) Key() u.Key {
func
(
b
*
Block
)
String
()
string
{
return
fmt
.
Sprintf
(
"[Block %s]"
,
b
.
Key
())
}
func
(
b
*
Block
)
Loggable
()
map
[
string
]
interface
{}
{
return
map
[
string
]
interface
{}{
"block"
:
b
.
Key
()
.
String
(),
}
}
exchange/bitswap/bitswap.go
浏览文件 @
d7eb57f4
...
...
@@ -8,7 +8,6 @@ import (
"time"
context
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
inflect
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect"
process
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
blocks
"github.com/jbenet/go-ipfs/blocks"
...
...
@@ -37,9 +36,13 @@ const (
maxProvidersPerRequest
=
3
providerRequestTimeout
=
time
.
Second
*
10
hasBlockTimeout
=
time
.
Second
*
15
provideTimeout
=
time
.
Second
*
15
sizeBatchRequestChan
=
32
// kMaxPriority is the max priority as defined by the bitswap protocol
kMaxPriority
=
math
.
MaxInt32
hasBlockBufferSize
=
256
provideWorkers
=
4
)
var
(
...
...
@@ -86,18 +89,12 @@ func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork,
wantlist
:
wantlist
.
NewThreadSafe
(),
batchRequests
:
make
(
chan
*
blockRequest
,
sizeBatchRequestChan
),
process
:
px
,
newBlocks
:
make
(
chan
*
blocks
.
Block
,
hasBlockBufferSize
),
}
network
.
SetDelegate
(
bs
)
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
clientWorker
(
ctx
)
})
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
taskWorker
(
ctx
)
})
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
rebroadcastWorker
(
ctx
)
})
// Start up bitswaps async worker routines
bs
.
startWorkers
(
px
,
ctx
)
return
bs
}
...
...
@@ -126,6 +123,8 @@ type bitswap struct {
wantlist
*
wantlist
.
ThreadSafe
process
process
.
Process
newBlocks
chan
*
blocks
.
Block
}
type
blockRequest
struct
{
...
...
@@ -172,7 +171,6 @@ func (bs *bitswap) GetBlock(parent context.Context, k u.Key) (*blocks.Block, err
case
<-
parent
.
Done
()
:
return
nil
,
parent
.
Err
()
}
}
// GetBlocks returns a channel where the caller may receive blocks that
...
...
@@ -205,6 +203,7 @@ func (bs *bitswap) GetBlocks(ctx context.Context, keys []u.Key) (<-chan *blocks.
// HasBlock announces the existance of a block to this bitswap service. The
// service will potentially notify its peers.
func
(
bs
*
bitswap
)
HasBlock
(
ctx
context
.
Context
,
blk
*
blocks
.
Block
)
error
{
log
.
Event
(
ctx
,
"hasBlock"
,
blk
)
select
{
case
<-
bs
.
process
.
Closing
()
:
return
errors
.
New
(
"bitswap is closed"
)
...
...
@@ -215,7 +214,12 @@ func (bs *bitswap) HasBlock(ctx context.Context, blk *blocks.Block) error {
}
bs
.
wantlist
.
Remove
(
blk
.
Key
())
bs
.
notifications
.
Publish
(
blk
)
return
bs
.
network
.
Provide
(
ctx
,
blk
.
Key
())
select
{
case
bs
.
newBlocks
<-
blk
:
case
<-
ctx
.
Done
()
:
return
ctx
.
Err
()
}
return
nil
}
func
(
bs
*
bitswap
)
sendWantlistMsgToPeers
(
ctx
context
.
Context
,
m
bsmsg
.
BitSwapMessage
,
peers
<-
chan
peer
.
ID
)
error
{
...
...
@@ -310,6 +314,7 @@ func (bs *bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg
log
.
Debug
(
err
)
}
}
var
keys
[]
u
.
Key
for
_
,
block
:=
range
incoming
.
Blocks
()
{
keys
=
append
(
keys
,
block
.
Key
())
...
...
@@ -391,82 +396,3 @@ func (bs *bitswap) send(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage)
func
(
bs
*
bitswap
)
Close
()
error
{
return
bs
.
process
.
Close
()
}
func
(
bs
*
bitswap
)
taskWorker
(
ctx
context
.
Context
)
{
defer
log
.
Info
(
"bitswap task worker shutting down..."
)
for
{
select
{
case
<-
ctx
.
Done
()
:
return
case
nextEnvelope
:=
<-
bs
.
engine
.
Outbox
()
:
select
{
case
<-
ctx
.
Done
()
:
return
case
envelope
,
ok
:=
<-
nextEnvelope
:
if
!
ok
{
continue
}
log
.
Event
(
ctx
,
"deliverBlocks"
,
envelope
.
Message
,
envelope
.
Peer
)
bs
.
send
(
ctx
,
envelope
.
Peer
,
envelope
.
Message
)
}
}
}
}
// TODO ensure only one active request per key
func
(
bs
*
bitswap
)
clientWorker
(
parent
context
.
Context
)
{
defer
log
.
Info
(
"bitswap client worker shutting down..."
)
for
{
select
{
case
req
:=
<-
bs
.
batchRequests
:
keys
:=
req
.
keys
if
len
(
keys
)
==
0
{
log
.
Warning
(
"Received batch request for zero blocks"
)
continue
}
for
i
,
k
:=
range
keys
{
bs
.
wantlist
.
Add
(
k
,
kMaxPriority
-
i
)
}
bs
.
wantNewBlocks
(
req
.
ctx
,
keys
)
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child
,
_
:=
context
.
WithTimeout
(
req
.
ctx
,
providerRequestTimeout
)
providers
:=
bs
.
network
.
FindProvidersAsync
(
child
,
keys
[
0
],
maxProvidersPerRequest
)
err
:=
bs
.
sendWantlistToPeers
(
req
.
ctx
,
providers
)
if
err
!=
nil
{
log
.
Debugf
(
"error sending wantlist: %s"
,
err
)
}
case
<-
parent
.
Done
()
:
return
}
}
}
func
(
bs
*
bitswap
)
rebroadcastWorker
(
parent
context
.
Context
)
{
ctx
,
cancel
:=
context
.
WithCancel
(
parent
)
defer
cancel
()
broadcastSignal
:=
time
.
After
(
rebroadcastDelay
.
Get
())
for
{
select
{
case
<-
time
.
Tick
(
10
*
time
.
Second
)
:
n
:=
bs
.
wantlist
.
Len
()
if
n
>
0
{
log
.
Debug
(
n
,
inflect
.
FromNumber
(
"keys"
,
n
),
"in bitswap wantlist"
)
}
case
<-
broadcastSignal
:
// resend unfulfilled wantlist keys
entries
:=
bs
.
wantlist
.
Entries
()
if
len
(
entries
)
>
0
{
bs
.
sendWantlistToProviders
(
ctx
,
entries
)
}
broadcastSignal
=
time
.
After
(
rebroadcastDelay
.
Get
())
case
<-
parent
.
Done
()
:
return
}
}
}
exchange/bitswap/workers.go
0 → 100644
浏览文件 @
d7eb57f4
package
bitswap
import
(
"time"
context
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
inflect
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/briantigerchow/inflect"
process
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess"
)
func
(
bs
*
bitswap
)
startWorkers
(
px
process
.
Process
,
ctx
context
.
Context
)
{
// Start up a worker to handle block requests this node is making
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
clientWorker
(
ctx
)
})
// Start up a worker to handle requests from other nodes for the data on this node
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
taskWorker
(
ctx
)
})
// Start up a worker to manage periodically resending our wantlist out to peers
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
rebroadcastWorker
(
ctx
)
})
// Spawn up multiple workers to handle incoming blocks
// consider increasing number if providing blocks bottlenecks
// file transfers
for
i
:=
0
;
i
<
provideWorkers
;
i
++
{
px
.
Go
(
func
(
px
process
.
Process
)
{
bs
.
blockReceiveWorker
(
ctx
)
})
}
}
func
(
bs
*
bitswap
)
taskWorker
(
ctx
context
.
Context
)
{
defer
log
.
Info
(
"bitswap task worker shutting down..."
)
for
{
select
{
case
nextEnvelope
:=
<-
bs
.
engine
.
Outbox
()
:
select
{
case
envelope
,
ok
:=
<-
nextEnvelope
:
if
!
ok
{
continue
}
log
.
Event
(
ctx
,
"deliverBlocks"
,
envelope
.
Message
,
envelope
.
Peer
)
bs
.
send
(
ctx
,
envelope
.
Peer
,
envelope
.
Message
)
case
<-
ctx
.
Done
()
:
return
}
case
<-
ctx
.
Done
()
:
return
}
}
}
func
(
bs
*
bitswap
)
blockReceiveWorker
(
ctx
context
.
Context
)
{
for
{
select
{
case
blk
,
ok
:=
<-
bs
.
newBlocks
:
if
!
ok
{
log
.
Debug
(
"newBlocks channel closed"
)
return
}
ctx
,
_
:=
context
.
WithTimeout
(
ctx
,
provideTimeout
)
err
:=
bs
.
network
.
Provide
(
ctx
,
blk
.
Key
())
if
err
!=
nil
{
log
.
Error
(
err
)
}
case
<-
ctx
.
Done
()
:
return
}
}
}
// TODO ensure only one active request per key
func
(
bs
*
bitswap
)
clientWorker
(
parent
context
.
Context
)
{
defer
log
.
Info
(
"bitswap client worker shutting down..."
)
for
{
select
{
case
req
:=
<-
bs
.
batchRequests
:
keys
:=
req
.
keys
if
len
(
keys
)
==
0
{
log
.
Warning
(
"Received batch request for zero blocks"
)
continue
}
for
i
,
k
:=
range
keys
{
bs
.
wantlist
.
Add
(
k
,
kMaxPriority
-
i
)
}
bs
.
wantNewBlocks
(
req
.
ctx
,
keys
)
// NB: Optimization. Assumes that providers of key[0] are likely to
// be able to provide for all keys. This currently holds true in most
// every situation. Later, this assumption may not hold as true.
child
,
_
:=
context
.
WithTimeout
(
req
.
ctx
,
providerRequestTimeout
)
providers
:=
bs
.
network
.
FindProvidersAsync
(
child
,
keys
[
0
],
maxProvidersPerRequest
)
err
:=
bs
.
sendWantlistToPeers
(
req
.
ctx
,
providers
)
if
err
!=
nil
{
log
.
Debugf
(
"error sending wantlist: %s"
,
err
)
}
case
<-
parent
.
Done
()
:
return
}
}
}
func
(
bs
*
bitswap
)
rebroadcastWorker
(
parent
context
.
Context
)
{
ctx
,
cancel
:=
context
.
WithCancel
(
parent
)
defer
cancel
()
broadcastSignal
:=
time
.
After
(
rebroadcastDelay
.
Get
())
for
{
select
{
case
<-
time
.
Tick
(
10
*
time
.
Second
)
:
n
:=
bs
.
wantlist
.
Len
()
if
n
>
0
{
log
.
Debug
(
n
,
inflect
.
FromNumber
(
"keys"
,
n
),
"in bitswap wantlist"
)
}
case
<-
broadcastSignal
:
// resend unfulfilled wantlist keys
entries
:=
bs
.
wantlist
.
Entries
()
if
len
(
entries
)
>
0
{
bs
.
sendWantlistToProviders
(
ctx
,
entries
)
}
broadcastSignal
=
time
.
After
(
rebroadcastDelay
.
Get
())
case
<-
parent
.
Done
()
:
return
}
}
}
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论