提交 27f34b43 作者: Juan Batiz-Benet 提交者: Jeromy

renamed {R,}Lock -> {Pin,GC}Lock

License: MIT
Signed-off-by: 's avatarJuan Batiz-Benet <juan@benet.ai>
上级 7b675e87
......@@ -39,8 +39,16 @@ type Blockstore interface {
type GCBlockstore interface {
Blockstore
Lock() func()
RLock() func()
// GCLock locks the blockstore for garbage collection. No operations
// that expect to finish with a pin should ocurr simultaneously.
// Reading during GC is safe, and requires no lock.
GCLock() func()
// PinLock locks the blockstore for sequences of puts expected to finish
// with a pin (before GC). Multiple put->pin sequences can write through
// at the same time, but no GC should not happen simulatenously.
// Reading during Pinning is safe, and requires no lock.
PinLock() func()
}
func NewBlockstore(d ds.ThreadSafeDatastore) *blockstore {
......@@ -183,12 +191,12 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {
return output, nil
}
func (bs *blockstore) Lock() func() {
func (bs *blockstore) GCLock() func() {
bs.lk.Lock()
return bs.lk.Unlock
}
func (bs *blockstore) RLock() func() {
func (bs *blockstore) PinLock() func() {
bs.lk.RLock()
return bs.lk.RUnlock
}
......@@ -59,10 +59,10 @@ func (w *writecache) AllKeysChan(ctx context.Context) (<-chan key.Key, error) {
return w.blockstore.AllKeysChan(ctx)
}
func (w *writecache) Lock() func() {
return w.blockstore.(GCBlockstore).Lock()
func (w *writecache) GCLock() func() {
return w.blockstore.(GCBlockstore).GCLock()
}
func (w *writecache) RLock() func() {
return w.blockstore.(GCBlockstore).RLock()
func (w *writecache) PinLock() func() {
return w.blockstore.(GCBlockstore).PinLock()
}
......@@ -50,7 +50,7 @@ on disk.
return
}
unlock := n.Blockstore.RLock()
unlock := n.Blockstore.PinLock()
defer unlock()
// set recursive flag
......
......@@ -23,7 +23,7 @@ var log = logging.Logger("coreunix")
// Add builds a merkledag from the a reader, pinning all objects to the local
// datastore. Returns a key representing the root node.
func Add(n *core.IpfsNode, r io.Reader) (string, error) {
unlock := n.Blockstore.RLock()
unlock := n.Blockstore.PinLock()
defer unlock()
// TODO more attractive function signature importer.BuildDagFromReader
......@@ -46,7 +46,7 @@ func Add(n *core.IpfsNode, r io.Reader) (string, error) {
// AddR recursively adds files in |path|.
func AddR(n *core.IpfsNode, root string) (key string, err error) {
unlock := n.Blockstore.RLock()
unlock := n.Blockstore.PinLock()
defer unlock()
stat, err := os.Lstat(root)
......@@ -86,7 +86,7 @@ func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *merkle
file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil)
dir := files.NewSliceFile("", "", []files.File{file})
unlock := n.Blockstore.RLock()
unlock := n.Blockstore.PinLock()
defer unlock()
dagnode, err := addDir(n, dir)
if err != nil {
......
......@@ -300,15 +300,13 @@ func TestCantGet(t *testing.T) {
func TestFetchGraph(t *testing.T) {
var dservs []DAGService
bsis := bstest.Mocks(t, 2)
bsis := bstest.Mocks(2)
for _, bsi := range bsis {
dservs = append(dservs, NewDAGService(bsi))
}
read := io.LimitReader(u.NewTimeSeededRand(), 1024*32)
spl := &chunk.SizeSplitter{512}
root, err := imp.BuildDagFromReader(read, dservs[0], spl, nil)
root, err := imp.BuildDagFromReader(dservs[0], chunk.NewSizeSplitter(read, 512), nil)
if err != nil {
t.Fatal(err)
}
......@@ -319,10 +317,7 @@ func TestFetchGraph(t *testing.T) {
}
// create an offline dagstore and ensure all blocks were fetched
bs, err := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore))
if err != nil {
t.Fatal(err)
}
bs := bserv.New(bsis[1].Blockstore, offline.Exchange(bsis[1].Blockstore))
offline_ds := NewDAGService(bs)
ks := key.NewKeySet()
......@@ -334,14 +329,11 @@ func TestFetchGraph(t *testing.T) {
}
func TestEnumerateChildren(t *testing.T) {
bsi := bstest.Mocks(t, 1)
bsi := bstest.Mocks(1)
ds := NewDAGService(bsi[0])
spl := &chunk.SizeSplitter{512}
read := io.LimitReader(u.NewTimeSeededRand(), 1024*1024)
root, err := imp.BuildDagFromReader(read, ds, spl, nil)
root, err := imp.BuildDagFromReader(ds, chunk.NewSizeSplitter(read, 512), nil)
if err != nil {
t.Fatal(err)
}
......
......@@ -195,10 +195,7 @@ func TestDuplicateSemantics(t *testing.T) {
func TestFlush(t *testing.T) {
dstore := dssync.MutexWrap(ds.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv, err := bs.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
bserv := bs.New(bstore, offline.Exchange(bstore))
dserv := mdag.NewDAGService(bserv)
p := NewPinner(dstore, dserv)
......
......@@ -27,10 +27,7 @@ func copyMap(m map[key.Key]uint16) map[key.Key]uint64 {
func TestMultisetRoundtrip(t *testing.T) {
dstore := dssync.MutexWrap(datastore.NewMapDatastore())
bstore := blockstore.NewBlockstore(dstore)
bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
if err != nil {
t.Fatal(err)
}
bserv := blockservice.New(bstore, offline.Exchange(bstore))
dag := merkledag.NewDAGService(bserv)
fn := func(m map[key.Key]uint16) bool {
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论