提交 518adec0 作者: Kevin Atkinson

adder: add support for using CidV1

License: MIT
Signed-off-by: 's avatarKevin Atkinson <k@kevina.org>
上级 9b58fa77
...@@ -38,6 +38,7 @@ const ( ...@@ -38,6 +38,7 @@ const (
rawLeavesOptionName = "raw-leaves" rawLeavesOptionName = "raw-leaves"
noCopyOptionName = "nocopy" noCopyOptionName = "nocopy"
fstoreCacheOptionName = "fscache" fstoreCacheOptionName = "fscache"
cidVersionOptionName = "cid-version"
) )
const adderOutChanSize = 8 const adderOutChanSize = 8
...@@ -88,6 +89,7 @@ You can now refer to the added file in a gateway, like so: ...@@ -88,6 +89,7 @@ You can now refer to the added file in a gateway, like so:
cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"), cmds.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"),
cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"), cmds.BoolOption(noCopyOptionName, "Add the file using filestore. (experimental)"),
cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"), cmds.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"),
cmds.IntOption(cidVersionOptionName, "Cid version. Non-zero value will change default of 'raw-leaves' to true. (experimental)").Default(0),
}, },
PreRun: func(req cmds.Request) error { PreRun: func(req cmds.Request) error {
quiet, _, _ := req.Option(quietOptionName).Bool() quiet, _, _ := req.Option(quietOptionName).Bool()
...@@ -161,6 +163,7 @@ You can now refer to the added file in a gateway, like so: ...@@ -161,6 +163,7 @@ You can now refer to the added file in a gateway, like so:
rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool() rawblks, rbset, _ := req.Option(rawLeavesOptionName).Bool()
nocopy, _, _ := req.Option(noCopyOptionName).Bool() nocopy, _, _ := req.Option(noCopyOptionName).Bool()
fscache, _, _ := req.Option(fstoreCacheOptionName).Bool() fscache, _, _ := req.Option(fstoreCacheOptionName).Bool()
cidVer, _, _ := req.Option(cidVersionOptionName).Int()
if nocopy && !cfg.Experimental.FilestoreEnabled { if nocopy && !cfg.Experimental.FilestoreEnabled {
res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"), res.SetError(errors.New("filestore is not enabled, see https://git.io/vy4XN"),
...@@ -177,6 +180,16 @@ You can now refer to the added file in a gateway, like so: ...@@ -177,6 +180,16 @@ You can now refer to the added file in a gateway, like so:
return return
} }
if cidVer >= 1 && !rbset {
rawblks = true
}
prefix, err := dag.PrefixForCidVersion(cidVer)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
if hash { if hash {
nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{ nilnode, err := core.NewNode(n.Context(), &core.BuildCfg{
//TODO: need this to be true or all files //TODO: need this to be true or all files
...@@ -223,6 +236,7 @@ You can now refer to the added file in a gateway, like so: ...@@ -223,6 +236,7 @@ You can now refer to the added file in a gateway, like so:
fileAdder.Silent = silent fileAdder.Silent = silent
fileAdder.RawLeaves = rawblks fileAdder.RawLeaves = rawblks
fileAdder.NoCopy = nocopy fileAdder.NoCopy = nocopy
fileAdder.Prefix = prefix
if hash { if hash {
md := dagtest.Mock() md := dagtest.Mock()
......
...@@ -69,13 +69,7 @@ type AddedObject struct { ...@@ -69,13 +69,7 @@ type AddedObject struct {
} }
func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.DAGService) (*Adder, error) { func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.DAGService) (*Adder, error) {
mr, err := mfs.NewRoot(ctx, ds, unixfs.EmptyDirNode(), nil)
if err != nil {
return nil, err
}
return &Adder{ return &Adder{
mr: mr,
ctx: ctx, ctx: ctx,
pinning: p, pinning: p,
blockstore: bs, blockstore: bs,
...@@ -87,7 +81,6 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag. ...@@ -87,7 +81,6 @@ func NewAdder(ctx context.Context, p pin.Pinner, bs bstore.GCBlockstore, ds dag.
Wrap: false, Wrap: false,
Chunker: "", Chunker: "",
}, nil }, nil
} }
// Adder holds the switches passed to the `add` command. // Adder holds the switches passed to the `add` command.
...@@ -107,13 +100,28 @@ type Adder struct { ...@@ -107,13 +100,28 @@ type Adder struct {
NoCopy bool NoCopy bool
Chunker string Chunker string
root node.Node root node.Node
mr *mfs.Root mroot *mfs.Root
unlocker bs.Unlocker unlocker bs.Unlocker
tempRoot *cid.Cid tempRoot *cid.Cid
Prefix cid.Prefix
}
func (adder *Adder) mfsRoot() (*mfs.Root, error) {
if adder.mroot != nil {
return adder.mroot, nil
}
rnode := unixfs.EmptyDirNode()
rnode.SetPrefix(adder.Prefix)
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
if err != nil {
return nil, err
}
adder.mroot = mr
return adder.mroot, nil
} }
func (adder *Adder) SetMfsRoot(r *mfs.Root) { func (adder *Adder) SetMfsRoot(r *mfs.Root) {
adder.mr = r adder.mroot = r
} }
// Constructs a node from reader's data, and adds it. Doesn't pin. // Constructs a node from reader's data, and adds it. Doesn't pin.
...@@ -122,11 +130,13 @@ func (adder Adder) add(reader io.Reader) (node.Node, error) { ...@@ -122,11 +130,13 @@ func (adder Adder) add(reader io.Reader) (node.Node, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
params := ihelper.DagBuilderParams{ params := ihelper.DagBuilderParams{
Dagserv: adder.dagService, Dagserv: adder.dagService,
RawLeaves: adder.RawLeaves, RawLeaves: adder.RawLeaves,
Maxlinks: ihelper.DefaultLinksPerBlock, Maxlinks: ihelper.DefaultLinksPerBlock,
NoCopy: adder.NoCopy, NoCopy: adder.NoCopy,
Prefix: adder.Prefix,
} }
if adder.Trickle { if adder.Trickle {
...@@ -142,7 +152,11 @@ func (adder *Adder) RootNode() (node.Node, error) { ...@@ -142,7 +152,11 @@ func (adder *Adder) RootNode() (node.Node, error) {
return adder.root, nil return adder.root, nil
} }
root, err := adder.mr.GetValue().GetNode() mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
root, err := mr.GetValue().GetNode()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -188,9 +202,13 @@ func (adder *Adder) PinRoot() error { ...@@ -188,9 +202,13 @@ func (adder *Adder) PinRoot() error {
} }
func (adder *Adder) Finalize() (node.Node, error) { func (adder *Adder) Finalize() (node.Node, error) {
root := adder.mr.GetValue() mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
root := mr.GetValue()
err := root.Flush() err = root.Flush()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -203,7 +221,12 @@ func (adder *Adder) Finalize() (node.Node, error) { ...@@ -203,7 +221,12 @@ func (adder *Adder) Finalize() (node.Node, error) {
} }
name = children[0] name = children[0]
dir, ok := adder.mr.GetValue().(*mfs.Directory) mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
dir, ok := mr.GetValue().(*mfs.Directory)
if !ok { if !ok {
return nil, fmt.Errorf("root is not a directory") return nil, fmt.Errorf("root is not a directory")
} }
...@@ -219,7 +242,7 @@ func (adder *Adder) Finalize() (node.Node, error) { ...@@ -219,7 +242,7 @@ func (adder *Adder) Finalize() (node.Node, error) {
return nil, err return nil, err
} }
err = adder.mr.Close() err = mr.Close()
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -357,14 +380,18 @@ func (adder *Adder) addNode(node node.Node, path string) error { ...@@ -357,14 +380,18 @@ func (adder *Adder) addNode(node node.Node, path string) error {
node = pi.Node node = pi.Node
} }
mr, err := adder.mfsRoot()
if err != nil {
return err
}
dir := gopath.Dir(path) dir := gopath.Dir(path)
if dir != "." { if dir != "." {
if err := mfs.Mkdir(adder.mr, dir, true, false); err != nil { if err := mfs.Mkdir(mr, dir, true, false); err != nil {
return err return err
} }
} }
if err := mfs.PutNode(adder.mr, path, node); err != nil { if err := mfs.PutNode(mr, path, node); err != nil {
return err return err
} }
...@@ -406,6 +433,7 @@ func (adder *Adder) addFile(file files.File) error { ...@@ -406,6 +433,7 @@ func (adder *Adder) addFile(file files.File) error {
} }
dagnode := dag.NodeWithData(sdata) dagnode := dag.NodeWithData(sdata)
dagnode.SetPrefix(adder.Prefix)
_, err = adder.dagService.Add(dagnode) _, err = adder.dagService.Add(dagnode)
if err != nil { if err != nil {
return err return err
...@@ -439,7 +467,11 @@ func (adder *Adder) addFile(file files.File) error { ...@@ -439,7 +467,11 @@ func (adder *Adder) addFile(file files.File) error {
func (adder *Adder) addDir(dir files.File) error { func (adder *Adder) addDir(dir files.File) error {
log.Infof("adding directory: %s", dir.FileName()) log.Infof("adding directory: %s", dir.FileName())
err := mfs.Mkdir(adder.mr, dir.FileName(), true, false) mr, err := adder.mfsRoot()
if err != nil {
return err
}
err = mfs.Mkdir(mr, dir.FileName(), true, false)
if err != nil { if err != nil {
return err return err
} }
......
...@@ -13,7 +13,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) { ...@@ -13,7 +13,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) {
var root *h.UnixfsNode var root *h.UnixfsNode
for level := 0; !db.Done(); level++ { for level := 0; !db.Done(); level++ {
nroot := h.NewUnixfsNode() nroot := db.NewUnixfsNode()
db.SetPosInfo(nroot, 0) db.SetPosInfo(nroot, 0)
// add our old root as a child of the new root. // add our old root as a child of the new root.
...@@ -33,7 +33,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) { ...@@ -33,7 +33,7 @@ func BalancedLayout(db *h.DagBuilderHelper) (node.Node, error) {
} }
if root == nil { if root == nil {
root = h.NewUnixfsNode() root = db.NewUnixfsNode()
} }
out, err := db.Add(root) out, err := db.Add(root)
...@@ -72,7 +72,7 @@ func fillNodeRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int, offset u ...@@ -72,7 +72,7 @@ func fillNodeRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int, offset u
// while we have room AND we're not done // while we have room AND we're not done
for node.NumChildren() < db.Maxlinks() && !db.Done() { for node.NumChildren() < db.Maxlinks() && !db.Done() {
child := h.NewUnixfsNode() child := db.NewUnixfsNode()
db.SetPosInfo(child, offset) db.SetPosInfo(child, offset)
err := fillNodeRec(db, child, depth-1, offset) err := fillNodeRec(db, child, depth-1, offset)
......
...@@ -7,7 +7,9 @@ import ( ...@@ -7,7 +7,9 @@ import (
"github.com/ipfs/go-ipfs/commands/files" "github.com/ipfs/go-ipfs/commands/files"
"github.com/ipfs/go-ipfs/importer/chunk" "github.com/ipfs/go-ipfs/importer/chunk"
dag "github.com/ipfs/go-ipfs/merkledag" dag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
) )
...@@ -23,6 +25,7 @@ type DagBuilderHelper struct { ...@@ -23,6 +25,7 @@ type DagBuilderHelper struct {
batch *dag.Batch batch *dag.Batch
fullPath string fullPath string
stat os.FileInfo stat os.FileInfo
prefix cid.Prefix
} }
type DagBuilderParams struct { type DagBuilderParams struct {
...@@ -33,6 +36,9 @@ type DagBuilderParams struct { ...@@ -33,6 +36,9 @@ type DagBuilderParams struct {
// instead of using the unixfs TRaw type // instead of using the unixfs TRaw type
RawLeaves bool RawLeaves bool
// CID Prefix to use
Prefix cid.Prefix
// DAGService to write blocks to (required) // DAGService to write blocks to (required)
Dagserv dag.DAGService Dagserv dag.DAGService
...@@ -48,6 +54,7 @@ func (dbp *DagBuilderParams) New(spl chunk.Splitter) *DagBuilderHelper { ...@@ -48,6 +54,7 @@ func (dbp *DagBuilderParams) New(spl chunk.Splitter) *DagBuilderHelper {
dserv: dbp.Dagserv, dserv: dbp.Dagserv,
spl: spl, spl: spl,
rawLeaves: dbp.RawLeaves, rawLeaves: dbp.RawLeaves,
prefix: dbp.Prefix,
maxlinks: dbp.Maxlinks, maxlinks: dbp.Maxlinks,
batch: dbp.Dagserv.Batch(), batch: dbp.Dagserv.Batch(),
} }
...@@ -103,6 +110,26 @@ func (db *DagBuilderHelper) GetDagServ() dag.DAGService { ...@@ -103,6 +110,26 @@ func (db *DagBuilderHelper) GetDagServ() dag.DAGService {
return db.dserv return db.dserv
} }
// NewUnixfsNode creates a new Unixfs node to represent a file.
func (db *DagBuilderHelper) NewUnixfsNode() *UnixfsNode {
n := &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
n.SetPrefix(db.prefix)
return n
}
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func (db *DagBuilderHelper) NewUnixfsBlock() *UnixfsNode {
n := &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
n.SetPrefix(db.prefix)
return n
}
// FillNodeLayer will add datanodes as children to the give node until // FillNodeLayer will add datanodes as children to the give node until
// at most db.indirSize ndoes are added // at most db.indirSize ndoes are added
// //
...@@ -143,7 +170,7 @@ func (db *DagBuilderHelper) GetNextDataNode() (*UnixfsNode, error) { ...@@ -143,7 +170,7 @@ func (db *DagBuilderHelper) GetNextDataNode() (*UnixfsNode, error) {
raw: true, raw: true,
}, nil }, nil
} else { } else {
blk := NewUnixfsBlock() blk := db.NewUnixfsBlock()
blk.SetData(data) blk.SetData(data)
return blk, nil return blk, nil
} }
......
...@@ -10,6 +10,7 @@ import ( ...@@ -10,6 +10,7 @@ import (
pi "github.com/ipfs/go-ipfs/thirdparty/posinfo" pi "github.com/ipfs/go-ipfs/thirdparty/posinfo"
ft "github.com/ipfs/go-ipfs/unixfs" ft "github.com/ipfs/go-ipfs/unixfs"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
) )
...@@ -48,22 +49,6 @@ type UnixfsNode struct { ...@@ -48,22 +49,6 @@ type UnixfsNode struct {
posInfo *pi.PosInfo posInfo *pi.PosInfo
} }
// NewUnixfsNode creates a new Unixfs node to represent a file
func NewUnixfsNode() *UnixfsNode {
return &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
}
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func NewUnixfsBlock() *UnixfsNode {
return &UnixfsNode{
node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
}
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node // NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node
func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) { func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
mb, err := ft.FSNodeFromBytes(nd.Data()) mb, err := ft.FSNodeFromBytes(nd.Data())
...@@ -77,6 +62,10 @@ func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) { ...@@ -77,6 +62,10 @@ func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
}, nil }, nil
} }
func (n *UnixfsNode) SetPrefix(prefix cid.Prefix) {
n.node.SetPrefix(prefix)
}
func (n *UnixfsNode) NumChildren() int { func (n *UnixfsNode) NumChildren() int {
return n.ufmt.NumChildren() return n.ufmt.NumChildren()
} }
......
...@@ -18,13 +18,13 @@ import ( ...@@ -18,13 +18,13 @@ import (
const layerRepeat = 4 const layerRepeat = 4
func TrickleLayout(db *h.DagBuilderHelper) (node.Node, error) { func TrickleLayout(db *h.DagBuilderHelper) (node.Node, error) {
root := h.NewUnixfsNode() root := db.NewUnixfsNode()
if err := db.FillNodeLayer(root); err != nil { if err := db.FillNodeLayer(root); err != nil {
return nil, err return nil, err
} }
for level := 1; !db.Done(); level++ { for level := 1; !db.Done(); level++ {
for i := 0; i < layerRepeat && !db.Done(); i++ { for i := 0; i < layerRepeat && !db.Done(); i++ {
next := h.NewUnixfsNode() next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, level); err != nil { if err := fillTrickleRec(db, next, level); err != nil {
return nil, err return nil, err
} }
...@@ -54,7 +54,7 @@ func fillTrickleRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int) error ...@@ -54,7 +54,7 @@ func fillTrickleRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int) error
for i := 1; i < depth && !db.Done(); i++ { for i := 1; i < depth && !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ { for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode() next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, i); err != nil { if err := fillTrickleRec(db, next, i); err != nil {
return err return err
} }
...@@ -117,7 +117,7 @@ func TrickleAppend(ctx context.Context, basen node.Node, db *h.DagBuilderHelper) ...@@ -117,7 +117,7 @@ func TrickleAppend(ctx context.Context, basen node.Node, db *h.DagBuilderHelper)
// Now, continue filling out tree like normal // Now, continue filling out tree like normal
for i := n; !db.Done(); i++ { for i := n; !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ { for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode() next := db.NewUnixfsNode()
err := fillTrickleRec(db, next, i) err := fillTrickleRec(db, next, i)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -162,7 +162,7 @@ func appendFillLastChild(ctx context.Context, ufsn *h.UnixfsNode, depth int, lay ...@@ -162,7 +162,7 @@ func appendFillLastChild(ctx context.Context, ufsn *h.UnixfsNode, depth int, lay
// Partially filled depth layer // Partially filled depth layer
if layerFill != 0 { if layerFill != 0 {
for ; layerFill < layerRepeat && !db.Done(); layerFill++ { for ; layerFill < layerRepeat && !db.Done(); layerFill++ {
next := h.NewUnixfsNode() next := db.NewUnixfsNode()
err := fillTrickleRec(db, next, depth) err := fillTrickleRec(db, next, depth)
if err != nil { if err != nil {
return err return err
...@@ -211,7 +211,7 @@ func trickleAppendRec(ctx context.Context, ufsn *h.UnixfsNode, db *h.DagBuilderH ...@@ -211,7 +211,7 @@ func trickleAppendRec(ctx context.Context, ufsn *h.UnixfsNode, db *h.DagBuilderH
// Now, continue filling out tree like normal // Now, continue filling out tree like normal
for i := n; i < depth && !db.Done(); i++ { for i := n; i < depth && !db.Done(); i++ {
for j := 0; j < layerRepeat && !db.Done(); j++ { for j := 0; j < layerRepeat && !db.Done(); j++ {
next := h.NewUnixfsNode() next := db.NewUnixfsNode()
if err := fillTrickleRec(db, next, i); err != nil { if err := fillTrickleRec(db, next, i); err != nil {
return nil, err return nil, err
} }
......
...@@ -86,7 +86,7 @@ func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) { ...@@ -86,7 +86,7 @@ func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) {
if n.cached == nil { if n.cached == nil {
if n.Prefix.Codec == 0 { // unset if n.Prefix.Codec == 0 { // unset
n.Prefix = defaultCidPrefix n.Prefix = v0CidPrefix
} }
c, err := n.Prefix.Sum(n.encoded) c, err := n.Prefix.Sum(n.encoded)
if err != nil { if err != nil {
......
...@@ -28,13 +28,37 @@ type ProtoNode struct { ...@@ -28,13 +28,37 @@ type ProtoNode struct {
Prefix cid.Prefix Prefix cid.Prefix
} }
var defaultCidPrefix = cid.Prefix{ var v0CidPrefix = cid.Prefix{
Codec: cid.DagProtobuf, Codec: cid.DagProtobuf,
MhLength: -1, MhLength: -1,
MhType: mh.SHA2_256, MhType: mh.SHA2_256,
Version: 0, Version: 0,
} }
var v1CidPrefix = cid.Prefix{
Codec: cid.DagProtobuf,
MhLength: -1,
MhType: mh.SHA2_256,
Version: 1,
}
func PrefixForCidVersion(version int) (cid.Prefix, error) {
switch version {
case 0:
return v0CidPrefix, nil
case 1:
return v1CidPrefix, nil
default:
return cid.Prefix{}, fmt.Errorf("unknown CID version: %d", version)
}
}
func (n *ProtoNode) SetPrefix(prefix cid.Prefix) {
n.Prefix = prefix
n.encoded = nil
n.cached = nil
}
type LinkSlice []*node.Link type LinkSlice []*node.Link
func (ls LinkSlice) Len() int { return len(ls) } func (ls LinkSlice) Len() int { return len(ls) }
...@@ -158,6 +182,9 @@ func (n *ProtoNode) Copy() node.Node { ...@@ -158,6 +182,9 @@ func (n *ProtoNode) Copy() node.Node {
nnode.links = make([]*node.Link, len(n.links)) nnode.links = make([]*node.Link, len(n.links))
copy(nnode.links, n.links) copy(nnode.links, n.links)
} }
nnode.Prefix = n.Prefix
return nnode return nnode
} }
...@@ -260,7 +287,7 @@ func (n *ProtoNode) Cid() *cid.Cid { ...@@ -260,7 +287,7 @@ func (n *ProtoNode) Cid() *cid.Cid {
} }
if n.Prefix.Codec == 0 { if n.Prefix.Codec == 0 {
n.Prefix = defaultCidPrefix n.Prefix = v0CidPrefix
} }
c, err := n.Prefix.Sum(n.RawData()) c, err := n.Prefix.Sum(n.RawData())
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
uio "github.com/ipfs/go-ipfs/unixfs/io" uio "github.com/ipfs/go-ipfs/unixfs/io"
ufspb "github.com/ipfs/go-ipfs/unixfs/pb" ufspb "github.com/ipfs/go-ipfs/unixfs/pb"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
) )
...@@ -57,6 +58,10 @@ func NewDirectory(ctx context.Context, name string, node node.Node, parent child ...@@ -57,6 +58,10 @@ func NewDirectory(ctx context.Context, name string, node node.Node, parent child
}, nil }, nil
} }
func (d *Directory) SetPrefix(prefix cid.Prefix) {
d.dirbuilder.SetPrefix(prefix)
}
// closeChild updates the child by the given name to the dag node 'nd' // closeChild updates the child by the given name to the dag node 'nd'
// and changes its own dag node // and changes its own dag node
func (d *Directory) closeChild(name string, nd node.Node, sync bool) error { func (d *Directory) closeChild(name string, nd node.Node, sync bool) error {
......
...@@ -134,6 +134,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { ...@@ -134,6 +134,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
if err != nil { if err != nil {
return err return err
} }
mkd.SetPrefix(r.Prefix)
fsn = mkd fsn = mkd
} else if err != nil { } else if err != nil {
return err return err
...@@ -152,6 +153,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error { ...@@ -152,6 +153,7 @@ func Mkdir(r *Root, pth string, mkparents bool, flush bool) error {
return err return err
} }
} }
final.SetPrefix(r.Prefix)
if flush { if flush {
err := final.Flush() err := final.Flush()
......
...@@ -61,6 +61,9 @@ type Root struct { ...@@ -61,6 +61,9 @@ type Root struct {
dserv dag.DAGService dserv dag.DAGService
Type string Type string
// Prefix to use for any children created
Prefix cid.Prefix
} }
type PubFunc func(context.Context, *cid.Cid) error type PubFunc func(context.Context, *cid.Cid) error
......
...@@ -100,7 +100,7 @@ test_add_cat_5MB() { ...@@ -100,7 +100,7 @@ test_add_cat_5MB() {
test_cmp sha1_expected sha1_actual test_cmp sha1_expected sha1_actual
' '
test_expect_success "'ipfs add bigfile' succeeds" ' test_expect_success "'ipfs add $ADD_FLAGS bigfile' succeeds" '
ipfs add $ADD_FLAGS mountdir/bigfile >actual || ipfs add $ADD_FLAGS mountdir/bigfile >actual ||
test_fsh cat daemon_err test_fsh cat daemon_err
' '
...@@ -142,6 +142,9 @@ test_add_cat_raw() { ...@@ -142,6 +142,9 @@ test_add_cat_raw() {
} }
test_add_cat_expensive() { test_add_cat_expensive() {
ADD_FLAGS="$1"
HASH="$2"
test_expect_success EXPENSIVE "generate 100MB file using go-random" ' test_expect_success EXPENSIVE "generate 100MB file using go-random" '
random 104857600 42 >mountdir/bigfile random 104857600 42 >mountdir/bigfile
' '
...@@ -152,12 +155,11 @@ test_add_cat_expensive() { ...@@ -152,12 +155,11 @@ test_add_cat_expensive() {
test_cmp sha1_expected sha1_actual test_cmp sha1_expected sha1_actual
' '
test_expect_success EXPENSIVE "ipfs add bigfile succeeds" ' test_expect_success EXPENSIVE "ipfs add $ADD_FLAGS bigfile succeeds" '
ipfs add mountdir/bigfile >actual ipfs add $ADD_FLAGS mountdir/bigfile >actual
' '
test_expect_success EXPENSIVE "ipfs add bigfile output looks good" ' test_expect_success EXPENSIVE "ipfs add bigfile output looks good" '
HASH="QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3" &&
echo "added $HASH bigfile" >expected && echo "added $HASH bigfile" >expected &&
test_cmp expected actual test_cmp expected actual
' '
...@@ -391,6 +393,16 @@ MARS="zb2rhZdTkQNawVajsTNiYc9cTPHqgLdJVvBRkZok9RjkgQYRU" ...@@ -391,6 +393,16 @@ MARS="zb2rhZdTkQNawVajsTNiYc9cTPHqgLdJVvBRkZok9RjkgQYRU"
VENUS="zb2rhn6TGvnUaMAg4VV4y9HVx5W42HihcH4jsyrDv8mkepFqq" VENUS="zb2rhn6TGvnUaMAg4VV4y9HVx5W42HihcH4jsyrDv8mkepFqq"
add_directory '--raw-leaves' add_directory '--raw-leaves'
PLANETS="zdj7Wnbun6P41Z5ddTkNvZaDTmQ8ZLdiKFcJrL9sV87rPScMP"
MARS="zb2rhZdTkQNawVajsTNiYc9cTPHqgLdJVvBRkZok9RjkgQYRU"
VENUS="zb2rhn6TGvnUaMAg4VV4y9HVx5W42HihcH4jsyrDv8mkepFqq"
add_directory '--cid-version=1'
PLANETS="zdj7WiC51v78BjBcmZR7uuBvmDWxSn5EDr5MiyTwE18e8qvb7"
MARS="zdj7WWx6fGNrNGkdpkuTAxCjKbQ1pPtarqA6VQhedhLTZu34J"
VENUS="zdj7WbB1BUF8WejmVpQCmMLd1RbPnxJtvAj1Lep6eTmXRFbrz"
add_directory '--cid-version=1 --raw-leaves=false'
test_expect_success "'ipfs add -rn' succeeds" ' test_expect_success "'ipfs add -rn' succeeds" '
mkdir -p mountdir/moons/jupiter && mkdir -p mountdir/moons/jupiter &&
...@@ -425,7 +437,20 @@ test_add_cat_5MB "" "QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb" ...@@ -425,7 +437,20 @@ test_add_cat_5MB "" "QmSr7FqYkxYWGoSfy8ZiaMWQ5vosb18DQGCzjwEQnVHkTb"
test_add_cat_5MB --raw-leaves "QmbdLHCmdi48eM8T7D67oXjA1S2Puo8eMfngdHhdPukFd6" test_add_cat_5MB --raw-leaves "QmbdLHCmdi48eM8T7D67oXjA1S2Puo8eMfngdHhdPukFd6"
test_add_cat_expensive # note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using raw blocks
test_add_cat_5MB --cid-version=1 "zdj7WiiaedqVBXjX4SNqx3jfuZideDqdLYnDzCDJ66JDMK9o2"
# note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using CidV1 but using the legacy
# format (i.e. not raw)
test_add_cat_5MB '--cid-version=1 --raw-leaves=false' "zdj7WfgEsj897BBZj2mcfsRLhaPZcCixPV2G7DkWgF1Wdr64P"
test_add_cat_expensive "" "QmU9SWAPPmNEKZB8umYMmjYvN7VyHqABNvdA6GUi4MMEz3"
# note: the specified hash implies that internal nodes are stored
# using CidV1 and leaves are stored using raw blocks
test_add_cat_expensive "--cid-version=1" "zdj7WcatQrtuE4WMkS4XsfsMixuQN2po4irkYhqxeJyW1wgCq"
test_add_named_pipe " Post http://$API_ADDR/api/v0/add?encoding=json&progress=true&r=true&stream-channels=true:" test_add_named_pipe " Post http://$API_ADDR/api/v0/add?encoding=json&progress=true&r=true&stream-channels=true:"
...@@ -433,6 +458,12 @@ test_add_pwd_is_symlink ...@@ -433,6 +458,12 @@ test_add_pwd_is_symlink
test_add_cat_raw test_add_cat_raw
test_expect_success "ipfs add --cid-version=9 fails" '
echo "context" > afile.txt &&
test_must_fail ipfs add --cid-version=9 afile.txt 2>&1 | tee add_out &&
grep -q "unknown CID version" add_out
'
test_kill_ipfs_daemon test_kill_ipfs_daemon
# should work offline # should work offline
......
...@@ -27,6 +27,14 @@ added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy ...@@ -27,6 +27,14 @@ added QmYC3u5jGWuyFwvTxtvLYm2K3SpWZ31tg3NjpVVvh9cJaJ _jo7/wzvsihy
added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7 added QmQkib3f9XNX5sj6WEahLUPFpheTcwSRJwUCSvjcv8b9by _jo7
added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN ' added QmNQoesMj1qp8ApE51NbtTjFYksyzkezPD4cat7V2kzbKN '
add_w_d1_v1='added zb2rhjXyHbbgwgtAUwHtpBd8iXLgK22ZjVmaiJSMNmqBTpXS3 _jo7/-s782qgs
added zb2rhi6PQqQFbS4QsvrV8sL9ue1fvFoqtLVqogNPCZri8rquN _jo7/15totauzkak-
added zb2rhjQthC6LgnNZztpsF9LcfPxznh3cJnmzUx8dnSqNqJ8Yz _jo7/galecuirrj4r
added zb2rhYh9hgDw1DpaZfLUU5MkKNezPWjPTkgGQPiTyLpZYu3jn _jo7/mzo50r-1xidf5zx
added zb2rhZK5xwEUhY4uskfj4sn979aCH27cnqseVVznYDn7NFWtt _jo7/wzvsihy
added zdj7WfNC8EZchqskczxsgrVEqwLVpksQ9B5kopf391jVbCGwv _jo7
added zdj7Wn5jf686mfYE8gUKWzY7aTjp5eAQcecD8q4ZtqLJbDNxe '
add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93 add_w_d2='added Qme987pqNBhZZXy4ckeXiR7zaRQwBabB7fTgHurW2yJfNu 4r93
added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34 added QmU9Jqks8TPu4vFr6t7EKkAKQrSJuEujNj1AkzoCeTEDFJ gnz66h/1k0xpx34
added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx added QmSLYZycXAufRw3ePMVH2brbtYWCcWsmksGLbHcT8ia9Ke gnz66h/9cwudvacx
...@@ -133,6 +141,14 @@ test_add_w() { ...@@ -133,6 +141,14 @@ test_add_w() {
test_sort_cmp expected actual test_sort_cmp expected actual
' '
test_expect_success "ipfs add -w -r (dir) --cid-version=1 succeeds" '
ipfs add -r -w --cid-version=1 m/t_1wp-8a2/_jo7 >actual
'
test_expect_success "ipfs add -w -r (dir) --cid-version=1 is correct" '
echo "$add_w_d1_v1" >expected &&
test_sort_cmp expected actual
'
} }
test_init_ipfs test_init_ipfs
......
...@@ -27,6 +27,17 @@ test_add_symlinks() { ...@@ -27,6 +27,17 @@ test_add_symlinks() {
test_cmp filehash_exp filehash_out test_cmp filehash_exp filehash_out
' '
test_expect_success "ipfs add --cid-version=1 files succeeds" '
ipfs add -q -r --cid-version=1 files >filehash_all &&
tail -n 1 filehash_all >filehash_out
'
test_expect_success "output looks good" '
# note this hash implies all internal nodes are stored using CidV1
echo zdj7WZDQ2xMmr4qn6aRZTsE9fCUs2KmvPigpHdpssqUobwcWK > filehash_exp &&
test_cmp filehash_exp filehash_out
'
test_expect_success "adding a symlink adds the link itself" ' test_expect_success "adding a symlink adds the link itself" '
ipfs add -q files/bar/baz > goodlink_out ipfs add -q files/bar/baz > goodlink_out
' '
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
mdag "github.com/ipfs/go-ipfs/merkledag" mdag "github.com/ipfs/go-ipfs/merkledag"
format "github.com/ipfs/go-ipfs/unixfs" format "github.com/ipfs/go-ipfs/unixfs"
hamt "github.com/ipfs/go-ipfs/unixfs/hamt" hamt "github.com/ipfs/go-ipfs/unixfs/hamt"
cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid"
node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format"
) )
...@@ -79,6 +80,17 @@ func NewDirectoryFromNode(dserv mdag.DAGService, nd node.Node) (*Directory, erro ...@@ -79,6 +80,17 @@ func NewDirectoryFromNode(dserv mdag.DAGService, nd node.Node) (*Directory, erro
} }
} }
// SetPrefix sets the prefix of the root node
func (d *Directory) SetPrefix(prefix cid.Prefix) {
if d.dirnode != nil {
d.dirnode.SetPrefix(prefix)
}
// FIXME: Should we do this? -- kevina
//if d.shard != nil {
// d.shard.SetPrefix(prefix)
//}
}
// AddChild adds a (name, key)-pair to the root node. // AddChild adds a (name, key)-pair to the root node.
func (d *Directory) AddChild(ctx context.Context, name string, nd node.Node) error { func (d *Directory) AddChild(ctx context.Context, name string, nd node.Node) error {
if d.shard == nil { if d.shard == nil {
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论