diff --git a/Godeps/_workspace/src/github.com/ipfs/ipfs-update/test-dist/testnew.go b/Godeps/_workspace/src/github.com/ipfs/ipfs-update/test-dist/testnew.go index 22cc1d86..b50c92f3 100644 --- a/Godeps/_workspace/src/github.com/ipfs/ipfs-update/test-dist/testnew.go +++ b/Godeps/_workspace/src/github.com/ipfs/ipfs-update/test-dist/testnew.go @@ -262,7 +262,7 @@ func testFileAdd(tdir, bin string) error { stump.VLog(" - checking that we can add and cat a file") text := "hello world! This node should work" data := bytes.NewBufferString(text) - c := exec.Command(bin, "add", "-q") + c := exec.Command(bin, "add", "-q", "--progress=false") c.Env = []string{"IPFS_PATH=" + tdir} c.Stdin = data out, err := c.CombinedOutput() diff --git a/Godeps/_workspace/src/github.com/ipfs/ipfs-update/util/utils.go b/Godeps/_workspace/src/github.com/ipfs/ipfs-update/util/utils.go index 6b1f0d69..2a7ee53f 100644 --- a/Godeps/_workspace/src/github.com/ipfs/ipfs-update/util/utils.go +++ b/Godeps/_workspace/src/github.com/ipfs/ipfs-update/util/utils.go @@ -22,6 +22,12 @@ var ( IpfsVersionPath = "/ipns/dist.ipfs.io" ) +func init() { + if dist := os.Getenv("IPFS_DIST_PATH"); dist != "" { + IpfsVersionPath = dist + } +} + const fetchSizeLimit = 1024 * 1024 * 512 func ApiEndpoint(ipfspath string) (string, error) { diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/convert.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/convert.go index a4437292..03f03412 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/convert.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/convert.go @@ -37,7 +37,7 @@ func FromNetAddr(a net.Addr) (ma.Multiaddr, error) { } // Encapsulate - return ipm.Encapsulate(tcpm), nil + return ma.Encapsulate(ipm, tcpm), nil case "udp", "upd4", "udp6": ac, ok := a.(*net.UDPAddr) @@ -58,7 +58,7 @@ func FromNetAddr(a net.Addr) (ma.Multiaddr, error) { } // Encapsulate - return ipm.Encapsulate(udpm), nil + return ma.Encapsulate(ipm, udpm), nil case "utp", "utp4", "utp6": acc, ok := a.(*utp.Addr) @@ -85,7 +85,7 @@ func FromNetAddr(a net.Addr) (ma.Multiaddr, error) { } // Encapsulate - return ipm.Encapsulate(utpm), nil + return ma.Encapsulate(ipm, utpm), nil case "ip", "ip4", "ip6": ac, ok := a.(*net.IPAddr) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/ip.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/ip.go index e9b3dcfe..7f210d62 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/ip.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net/ip.go @@ -63,7 +63,7 @@ func IsIPLoopback(m ma.Multiaddr) bool { } // /ip6/::1 - if IP6Loopback.Equal(m) || IP6LinkLocalLoopback.Equal(m) { + if ma.Equal(IP6Loopback, m) || ma.Equal(IP6LinkLocalLoopback, m) { return true } @@ -81,5 +81,5 @@ func IsIP6LinkLocal(m ma.Multiaddr) bool { // IsIPUnspecified returns whether a Multiaddr is am Unspecified IP address // This means either /ip4/0.0.0.0 or /ip6/:: func IsIPUnspecified(m ma.Multiaddr) bool { - return IP4Unspecified.Equal(m) || IP6Unspecified.Equal(m) + return ma.Equal(IP4Unspecified, m) || ma.Equal(IP6Unspecified, m) } diff --git a/ipfs-3-to-4/base32/.gx/lastpubver b/ipfs-3-to-4/base32/.gx/lastpubver new file mode 100644 index 00000000..e15b5687 --- /dev/null +++ b/ipfs-3-to-4/base32/.gx/lastpubver @@ -0,0 +1 @@ +0.0.0: Qmb1DA2A9LS2wR4FFweB4uEDomFsdmnw1VLawLE1yQzudj diff --git a/ipfs-3-to-4/base32/base32.go b/ipfs-3-to-4/base32/base32.go new file mode 100644 index 00000000..99db4a46 --- /dev/null +++ b/ipfs-3-to-4/base32/base32.go @@ -0,0 +1,458 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package base32 implements base32 encoding as specified by RFC 4648. +package base32 + +import ( + "bytes" + "io" + "strconv" + "strings" +) + +/* + * Encodings + */ + +// An Encoding is a radix 32 encoding/decoding scheme, defined by a +// 32-character alphabet. The most common is the "base32" encoding +// introduced for SASL GSSAPI and standardized in RFC 4648. +// The alternate "base32hex" encoding is used in DNSSEC. +type Encoding struct { + encode string + decodeMap [256]byte + padChar rune +} + +const ( + StdPadding rune = '=' + NoPadding rune = -1 +) + +const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" +const encodeHex = "0123456789ABCDEFGHIJKLMNOPQRSTUV" + +// NewEncoding returns a new Encoding defined by the given alphabet, +// which must be a 32-byte string. +func NewEncoding(encoder string) *Encoding { + e := new(Encoding) + e.padChar = StdPadding + e.encode = encoder + for i := 0; i < len(e.decodeMap); i++ { + e.decodeMap[i] = 0xFF + } + for i := 0; i < len(encoder); i++ { + e.decodeMap[encoder[i]] = byte(i) + } + return e +} + +// WithPadding creates a new encoding identical to enc except +// with a specified padding character, or NoPadding to disable padding. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.padChar = padding + return &enc +} + +// StdEncoding is the standard base32 encoding, as defined in +// RFC 4648. +var StdEncoding = NewEncoding(encodeStd) + +// HexEncoding is the ``Extended Hex Alphabet'' defined in RFC 4648. +// It is typically used in DNS. +var HexEncoding = NewEncoding(encodeHex) + +var RawStdEncoding = NewEncoding(encodeStd).WithPadding(NoPadding) +var RawHexEncoding = NewEncoding(encodeHex).WithPadding(NoPadding) + +var removeNewlinesMapper = func(r rune) rune { + if r == '\r' || r == '\n' { + return -1 + } + return r +} + +/* + * Encoder + */ + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +// +// The encoding pads the output to a multiple of 8 bytes, +// so Encode is not appropriate for use on individual blocks +// of a large data stream. Use NewEncoder() instead. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) == 0 { + return + } + + for len(src) > 0 { + var carry byte + + // Unpack 8x 5-bit source blocks into a 5 byte + // destination quantum + switch len(src) { + default: + dst[7] = enc.encode[src[4]&0x1F] + carry = src[4] >> 5 + fallthrough + case 4: + dst[6] = enc.encode[carry|(src[3]<<3)&0x1F] + dst[5] = enc.encode[(src[3]>>2)&0x1F] + carry = src[3] >> 7 + fallthrough + case 3: + dst[4] = enc.encode[carry|(src[2]<<1)&0x1F] + carry = (src[2] >> 4) & 0x1F + fallthrough + case 2: + dst[3] = enc.encode[carry|(src[1]<<4)&0x1F] + dst[2] = enc.encode[(src[1]>>1)&0x1F] + carry = (src[1] >> 6) & 0x1F + fallthrough + case 1: + dst[1] = enc.encode[carry|(src[0]<<2)&0x1F] + dst[0] = enc.encode[src[0]>>3] + } + + // Pad the final quantum + if len(src) < 5 { + if enc.padChar != NoPadding { + dst[7] = byte(enc.padChar) + if len(src) < 4 { + dst[6] = byte(enc.padChar) + dst[5] = byte(enc.padChar) + if len(src) < 3 { + dst[4] = byte(enc.padChar) + if len(src) < 2 { + dst[3] = byte(enc.padChar) + dst[2] = byte(enc.padChar) + } + } + } + } + break + } + src = src[5:] + dst = dst[8:] + } +} + +// EncodeToString returns the base32 encoding of src. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +type encoder struct { + err error + enc *Encoding + w io.Writer + buf [5]byte // buffered data waiting to be encoded + nbuf int // number of bytes in buf + out [1024]byte // output buffer +} + +func (e *encoder) Write(p []byte) (n int, err error) { + if e.err != nil { + return 0, e.err + } + + // Leading fringe. + if e.nbuf > 0 { + var i int + for i = 0; i < len(p) && e.nbuf < 5; i++ { + e.buf[e.nbuf] = p[i] + e.nbuf++ + } + n += i + p = p[i:] + if e.nbuf < 5 { + return + } + e.enc.Encode(e.out[0:], e.buf[0:]) + if _, e.err = e.w.Write(e.out[0:8]); e.err != nil { + return n, e.err + } + e.nbuf = 0 + } + + // Large interior chunks. + for len(p) >= 5 { + nn := len(e.out) / 8 * 5 + if nn > len(p) { + nn = len(p) + nn -= nn % 5 + } + e.enc.Encode(e.out[0:], p[0:nn]) + if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil { + return n, e.err + } + n += nn + p = p[nn:] + } + + // Trailing fringe. + for i := 0; i < len(p); i++ { + e.buf[i] = p[i] + } + e.nbuf = len(p) + n += len(p) + return +} + +// Close flushes any pending output from the encoder. +// It is an error to call Write after calling Close. +func (e *encoder) Close() error { + // If there's anything left in the buffer, flush it out + if e.err == nil && e.nbuf > 0 { + e.enc.Encode(e.out[0:], e.buf[0:e.nbuf]) + e.nbuf = 0 + _, e.err = e.w.Write(e.out[0:8]) + } + return e.err +} + +// NewEncoder returns a new base32 stream encoder. Data written to +// the returned writer will be encoded using enc and then written to w. +// Base32 encodings operate in 5-byte blocks; when finished +// writing, the caller must Close the returned encoder to flush any +// partially written blocks. +func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser { + return &encoder{enc: enc, w: w} +} + +// EncodedLen returns the length in bytes of the base32 encoding +// of an input buffer of length n. +func (enc *Encoding) EncodedLen(n int) int { + if enc.padChar == NoPadding { + return (n*8 + 4) / 5 // minimum # chars at 5 bits per char + } + return (n + 4) / 5 * 8 +} + +/* + * Decoder + */ + +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10) +} + +// decode is like Decode but returns an additional 'end' value, which +// indicates if end-of-message padding was encountered and thus any +// additional data is an error. This method assumes that src has been +// stripped of all supported whitespace ('\r' and '\n'). +func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) { + olen := len(src) + for len(src) > 0 && !end { + // Decode quantum using the base32 alphabet + var dbuf [8]byte + dlen := 8 + + for j := 0; j < 8; { + if len(src) == 0 { + if enc.padChar != NoPadding { + return n, false, CorruptInputError(olen - len(src) - j) + } + dlen = j + break + } + in := src[0] + src = src[1:] + if in == byte(enc.padChar) && j >= 2 && len(src) < 8 { + if enc.padChar == NoPadding { + return n, false, CorruptInputError(olen) + } + + // We've reached the end and there's padding + if len(src)+j < 8-1 { + // not enough padding + return n, false, CorruptInputError(olen) + } + for k := 0; k < 8-1-j; k++ { + if len(src) > k && src[k] != byte(enc.padChar) { + // incorrect padding + return n, false, CorruptInputError(olen - len(src) + k - 1) + } + } + dlen, end = j, true + // 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not + // valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing + // the five valid padding lengths, and Section 9 "Illustrations and + // Examples" for an illustration for how the 1st, 3rd and 6th base32 + // src bytes do not yield enough information to decode a dst byte. + if dlen == 1 || dlen == 3 || dlen == 6 { + return n, false, CorruptInputError(olen - len(src) - 1) + } + break + } + dbuf[j] = enc.decodeMap[in] + if dbuf[j] == 0xFF { + return n, false, CorruptInputError(olen - len(src) - 1) + } + j++ + } + + // Pack 8x 5-bit source blocks into 5 byte destination + // quantum + switch dlen { + case 8: + dst[4] = dbuf[6]<<5 | dbuf[7] + fallthrough + case 7: + dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3 + fallthrough + case 5: + dst[2] = dbuf[3]<<4 | dbuf[4]>>1 + fallthrough + case 4: + dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4 + fallthrough + case 2: + dst[0] = dbuf[0]<<3 | dbuf[1]>>2 + } + + if len(dst) > 5 { + dst = dst[5:] + } + + switch dlen { + case 2: + n += 1 + case 4: + n += 2 + case 5: + n += 3 + case 7: + n += 4 + case 8: + n += 5 + } + } + return n, end, nil +} + +// Decode decodes src using the encoding enc. It writes at most +// DecodedLen(len(src)) bytes to dst and returns the number of bytes +// written. If src contains invalid base32 data, it will return the +// number of bytes successfully written and CorruptInputError. +// New line characters (\r and \n) are ignored. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + src = bytes.Map(removeNewlinesMapper, src) + n, _, err = enc.decode(dst, src) + return +} + +// DecodeString returns the bytes represented by the base32 string s. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + s = strings.Map(removeNewlinesMapper, s) + dbuf := make([]byte, enc.DecodedLen(len(s))) + n, _, err := enc.decode(dbuf, []byte(s)) + return dbuf[:n], err +} + +type decoder struct { + err error + enc *Encoding + r io.Reader + end bool // saw end of message + buf [1024]byte // leftover input + nbuf int + out []byte // leftover decoded output + outbuf [1024 / 8 * 5]byte +} + +func (d *decoder) Read(p []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + + // Use leftover decoded output from last read. + if len(d.out) > 0 { + n = copy(p, d.out) + d.out = d.out[n:] + return n, nil + } + + // Read a chunk. + nn := len(p) / 5 * 8 + if nn < 8 { + nn = 8 + } + if nn > len(d.buf) { + nn = len(d.buf) + } + nn, d.err = io.ReadAtLeast(d.r, d.buf[d.nbuf:nn], 8-d.nbuf) + d.nbuf += nn + if d.nbuf < 8 { + return 0, d.err + } + + // Decode chunk into p, or d.out and then p if p is too small. + nr := d.nbuf / 8 * 8 + nw := d.nbuf / 8 * 5 + if nw > len(p) { + nw, d.end, d.err = d.enc.decode(d.outbuf[0:], d.buf[0:nr]) + d.out = d.outbuf[0:nw] + n = copy(p, d.out) + d.out = d.out[n:] + } else { + n, d.end, d.err = d.enc.decode(p, d.buf[0:nr]) + } + d.nbuf -= nr + for i := 0; i < d.nbuf; i++ { + d.buf[i] = d.buf[i+nr] + } + + if d.err == nil { + d.err = err + } + return n, d.err +} + +type newlineFilteringReader struct { + wrapped io.Reader +} + +func (r *newlineFilteringReader) Read(p []byte) (int, error) { + n, err := r.wrapped.Read(p) + for n > 0 { + offset := 0 + for i, b := range p[0:n] { + if b != '\r' && b != '\n' { + if i != offset { + p[offset] = b + } + offset++ + } + } + if offset > 0 { + return offset, err + } + // Previous buffer entirely whitespace, read again + n, err = r.wrapped.Read(p) + } + return n, err +} + +// NewDecoder constructs a new base32 stream decoder. +func NewDecoder(enc *Encoding, r io.Reader) io.Reader { + return &decoder{enc: enc, r: &newlineFilteringReader{r}} +} + +// DecodedLen returns the maximum length in bytes of the decoded data +// corresponding to n bytes of base32-encoded data. +func (enc *Encoding) DecodedLen(n int) int { + if enc.padChar == NoPadding { + return (n*5 + 7) / 8 + } + + return n / 8 * 5 +} diff --git a/ipfs-3-to-4/base32/base32_test.go b/ipfs-3-to-4/base32/base32_test.go new file mode 100644 index 00000000..8212dad3 --- /dev/null +++ b/ipfs-3-to-4/base32/base32_test.go @@ -0,0 +1,355 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base32 + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +type testpair struct { + decoded, encoded string +} + +var pairs = []testpair{ + // RFC 4648 examples + {"", ""}, + {"f", "MY======"}, + {"fo", "MZXQ===="}, + {"foo", "MZXW6==="}, + {"foob", "MZXW6YQ="}, + {"fooba", "MZXW6YTB"}, + {"foobar", "MZXW6YTBOI======"}, + + // Wikipedia examples, converted to base32 + {"sure.", "ON2XEZJO"}, + {"sure", "ON2XEZI="}, + {"sur", "ON2XE==="}, + {"su", "ON2Q===="}, + {"leasure.", "NRSWC43VOJSS4==="}, + {"easure.", "MVQXG5LSMUXA===="}, + {"asure.", "MFZXK4TFFY======"}, + {"sure.", "ON2XEZJO"}, +} + +var bigtest = testpair{ + "Twas brillig, and the slithy toves", + "KR3WC4ZAMJZGS3DMNFTSYIDBNZSCA5DIMUQHG3DJORUHSIDUN53GK4Y=", +} + +func testEqual(t *testing.T, msg string, args ...interface{}) bool { + if args[len(args)-2] != args[len(args)-1] { + t.Errorf(msg, args...) + return false + } + return true +} + +func TestEncode(t *testing.T) { + for _, p := range pairs { + got := StdEncoding.EncodeToString([]byte(p.decoded)) + testEqual(t, "Encode(%q) = %q, want %q", p.decoded, got, p.encoded) + } +} + +func TestEncoder(t *testing.T) { + for _, p := range pairs { + bb := &bytes.Buffer{} + encoder := NewEncoder(StdEncoding, bb) + encoder.Write([]byte(p.decoded)) + encoder.Close() + testEqual(t, "Encode(%q) = %q, want %q", p.decoded, bb.String(), p.encoded) + } +} + +func TestEncoderBuffering(t *testing.T) { + input := []byte(bigtest.decoded) + for bs := 1; bs <= 12; bs++ { + bb := &bytes.Buffer{} + encoder := NewEncoder(StdEncoding, bb) + for pos := 0; pos < len(input); pos += bs { + end := pos + bs + if end > len(input) { + end = len(input) + } + n, err := encoder.Write(input[pos:end]) + testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil)) + testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos) + } + err := encoder.Close() + testEqual(t, "Close gave error %v, want %v", err, error(nil)) + testEqual(t, "Encoding/%d of %q = %q, want %q", bs, bigtest.decoded, bb.String(), bigtest.encoded) + } +} + +func TestDecode(t *testing.T) { + for _, p := range pairs { + dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) + count, end, err := StdEncoding.decode(dbuf, []byte(p.encoded)) + testEqual(t, "Decode(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, "Decode(%q) = length %v, want %v", p.encoded, count, len(p.decoded)) + if len(p.encoded) > 0 { + testEqual(t, "Decode(%q) = end %v, want %v", p.encoded, end, (p.encoded[len(p.encoded)-1] == '=')) + } + testEqual(t, "Decode(%q) = %q, want %q", p.encoded, + string(dbuf[0:count]), + p.decoded) + + dbuf, err = StdEncoding.DecodeString(p.encoded) + testEqual(t, "DecodeString(%q) = error %v, want %v", p.encoded, err, error(nil)) + testEqual(t, "DecodeString(%q) = %q, want %q", p.encoded, string(dbuf), p.decoded) + } +} + +func TestDecoder(t *testing.T) { + for _, p := range pairs { + decoder := NewDecoder(StdEncoding, strings.NewReader(p.encoded)) + dbuf := make([]byte, StdEncoding.DecodedLen(len(p.encoded))) + count, err := decoder.Read(dbuf) + if err != nil && err != io.EOF { + t.Fatal("Read failed", err) + } + testEqual(t, "Read from %q = length %v, want %v", p.encoded, count, len(p.decoded)) + testEqual(t, "Decoding of %q = %q, want %q", p.encoded, string(dbuf[0:count]), p.decoded) + if err != io.EOF { + count, err = decoder.Read(dbuf) + } + testEqual(t, "Read from %q = %v, want %v", p.encoded, err, io.EOF) + } +} + +func TestDecoderBuffering(t *testing.T) { + for bs := 1; bs <= 12; bs++ { + decoder := NewDecoder(StdEncoding, strings.NewReader(bigtest.encoded)) + buf := make([]byte, len(bigtest.decoded)+12) + var total int + for total = 0; total < len(bigtest.decoded); { + n, err := decoder.Read(buf[total : total+bs]) + testEqual(t, "Read from %q at pos %d = %d, %v, want _, %v", bigtest.encoded, total, n, err, error(nil)) + total += n + } + testEqual(t, "Decoding/%d of %q = %q, want %q", bs, bigtest.encoded, string(buf[0:total]), bigtest.decoded) + } +} + +func TestDecodeCorrupt(t *testing.T) { + testCases := []struct { + input string + offset int // -1 means no corruption. + }{ + {"", -1}, + {"!!!!", 0}, + {"x===", 0}, + {"AA=A====", 2}, + {"AAA=AAAA", 3}, + {"MMMMMMMMM", 8}, + {"MMMMMM", 0}, + {"A=", 1}, + {"AA=", 3}, + {"AA==", 4}, + {"AA===", 5}, + {"AAAA=", 5}, + {"AAAA==", 6}, + {"AAAAA=", 6}, + {"AAAAA==", 7}, + {"A=======", 1}, + {"AA======", -1}, + {"AAA=====", 3}, + {"AAAA====", -1}, + {"AAAAA===", -1}, + {"AAAAAA==", 6}, + {"AAAAAAA=", -1}, + {"AAAAAAAA", -1}, + } + for _, tc := range testCases { + dbuf := make([]byte, StdEncoding.DecodedLen(len(tc.input))) + _, err := StdEncoding.Decode(dbuf, []byte(tc.input)) + if tc.offset == -1 { + if err != nil { + t.Error("Decoder wrongly detected corruption in", tc.input) + } + continue + } + switch err := err.(type) { + case CorruptInputError: + testEqual(t, "Corruption in %q at offset %v, want %v", tc.input, int(err), tc.offset) + default: + t.Error("Decoder failed to detect corruption in", tc) + } + } +} + +func TestBig(t *testing.T) { + n := 3*1000 + 1 + raw := make([]byte, n) + const alpha = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + for i := 0; i < n; i++ { + raw[i] = alpha[i%len(alpha)] + } + encoded := new(bytes.Buffer) + w := NewEncoder(StdEncoding, encoded) + nn, err := w.Write(raw) + if nn != n || err != nil { + t.Fatalf("Encoder.Write(raw) = %d, %v want %d, nil", nn, err, n) + } + err = w.Close() + if err != nil { + t.Fatalf("Encoder.Close() = %v want nil", err) + } + decoded, err := ioutil.ReadAll(NewDecoder(StdEncoding, encoded)) + if err != nil { + t.Fatalf("ioutil.ReadAll(NewDecoder(...)): %v", err) + } + + if !bytes.Equal(raw, decoded) { + var i int + for i = 0; i < len(decoded) && i < len(raw); i++ { + if decoded[i] != raw[i] { + break + } + } + t.Errorf("Decode(Encode(%d-byte string)) failed at offset %d", n, i) + } +} + +func testStringEncoding(t *testing.T, expected string, examples []string) { + for _, e := range examples { + buf, err := StdEncoding.DecodeString(e) + if err != nil { + t.Errorf("Decode(%q) failed: %v", e, err) + continue + } + if s := string(buf); s != expected { + t.Errorf("Decode(%q) = %q, want %q", e, s, expected) + } + } +} + +func TestNewLineCharacters(t *testing.T) { + // Each of these should decode to the string "sure", without errors. + examples := []string{ + "ON2XEZI=", + "ON2XEZI=\r", + "ON2XEZI=\n", + "ON2XEZI=\r\n", + "ON2XEZ\r\nI=", + "ON2X\rEZ\nI=", + "ON2X\nEZ\rI=", + "ON2XEZ\nI=", + "ON2XEZI\n=", + } + testStringEncoding(t, "sure", examples) + + // Each of these should decode to the string "foobar", without errors. + examples = []string{ + "MZXW6YTBOI======", + "MZXW6YTBOI=\r\n=====", + } + testStringEncoding(t, "foobar", examples) +} + +func TestDecoderIssue4779(t *testing.T) { + encoded := `JRXXEZLNEBUXA43VNUQGI33MN5ZCA43JOQQGC3LFOQWCAY3PNZZWKY3UMV2HK4 +RAMFSGS4DJONUWG2LOM4QGK3DJOQWCA43FMQQGI3YKMVUXK43NN5SCA5DFNVYG64RANFXGG2LENFSH +K3TUEB2XIIDMMFRG64TFEBSXIIDEN5WG64TFEBWWCZ3OMEQGC3DJOF2WCLRAKV2CAZLONFWQUYLEEB +WWS3TJNUQHMZLONFQW2LBAOF2WS4ZANZXXG5DSOVSCAZLYMVZGG2LUMF2GS33OEB2WY3DBNVRW6IDM +MFRG64TJOMQG42LTNEQHK5AKMFWGS4LVNFYCAZLYEBSWCIDDN5WW233EN4QGG33OONSXC5LBOQXCAR +DVNFZSAYLVORSSA2LSOVZGKIDEN5WG64RANFXAU4TFOBZGK2DFNZSGK4TJOQQGS3RAOZXWY5LQORQX +IZJAOZSWY2LUEBSXG43FEBRWS3DMOVWSAZDPNRXXEZJAMV2SAZTVM5UWC5BANZ2WY3DBBJYGC4TJMF +2HK4ROEBCXQY3FOB2GK5LSEBZWS3TUEBXWGY3BMVRWC5BAMN2XA2LEMF2GC5BANZXW4IDQOJXWSZDF +NZ2CYIDTOVXHIIDJNYFGG5LMOBQSA4LVNEQG6ZTGNFRWSYJAMRSXGZLSOVXHIIDNN5WGY2LUEBQW42 +LNEBUWIIDFON2CA3DBMJXXE5LNFY== +====` + encodedShort := strings.Replace(encoded, "\n", "", -1) + + dec := NewDecoder(StdEncoding, strings.NewReader(encoded)) + res1, err := ioutil.ReadAll(dec) + if err != nil { + t.Errorf("ReadAll failed: %v", err) + } + + dec = NewDecoder(StdEncoding, strings.NewReader(encodedShort)) + var res2 []byte + res2, err = ioutil.ReadAll(dec) + if err != nil { + t.Errorf("ReadAll failed: %v", err) + } + + if !bytes.Equal(res1, res2) { + t.Error("Decoded results not equal") + } +} + +func BenchmarkEncodeToString(b *testing.B) { + data := make([]byte, 8192) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.EncodeToString(data) + } +} + +func BenchmarkDecodeString(b *testing.B) { + data := StdEncoding.EncodeToString(make([]byte, 8192)) + b.SetBytes(int64(len(data))) + for i := 0; i < b.N; i++ { + StdEncoding.DecodeString(data) + } +} + +func TestNoPadding(t *testing.T) { + examples := []string{ + "a", + "ab", + "abc", + "abcd", + "abcde", + "", + string([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), + } + + for _, e := range examples { + t.Log("testing: ", e) + enc := RawStdEncoding.EncodeToString([]byte(e)) + if strings.Contains(enc, "=") { + t.Fatal("shouldnt have any padding") + } + + out, err := RawStdEncoding.DecodeString(enc) + if err != nil { + t.Fatal(err) + } + + if string(out) != e { + t.Fatalf("round trip of %q failed", e) + } + } +} + +func TestNoPaddingRand(t *testing.T) { + buf := make([]byte, 1024) + for i := 0; i < 1000; i++ { + l := rand.Intn(len(buf)) + seg := buf[:l] + rand.Read(seg) + + enc := RawStdEncoding.EncodeToString(seg) + if strings.Contains(enc, "=") { + t.Fatal("shouldnt have any padding") + } + + out, err := RawStdEncoding.DecodeString(enc) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(out, seg) { + t.Fatalf("round trip of %q failed", seg) + } + } +} diff --git a/ipfs-3-to-4/base32/example_test.go b/ipfs-3-to-4/base32/example_test.go new file mode 100644 index 00000000..2a302d88 --- /dev/null +++ b/ipfs-3-to-4/base32/example_test.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Keep in sync with ../base64/example_test.go. + +package base32_test + +import ( + "encoding/base32" + "fmt" + "os" +) + +func ExampleEncoding_EncodeToString() { + data := []byte("any + old & data") + str := base32.StdEncoding.EncodeToString(data) + fmt.Println(str) + // Output: + // MFXHSIBLEBXWYZBAEYQGIYLUME====== +} + +func ExampleEncoding_DecodeString() { + str := "ONXW2ZJAMRQXIYJAO5UXI2BAAAQGC3TEEDX3XPY=" + data, err := base32.StdEncoding.DecodeString(str) + if err != nil { + fmt.Println("error:", err) + return + } + fmt.Printf("%q\n", data) + // Output: + // "some data with \x00 and \ufeff" +} + +func ExampleNewEncoder() { + input := []byte("foo\x00bar") + encoder := base32.NewEncoder(base32.StdEncoding, os.Stdout) + encoder.Write(input) + // Must close the encoder when finished to flush any partial blocks. + // If you comment out the following line, the last partial block "r" + // won't be encoded. + encoder.Close() + // Output: + // MZXW6ADCMFZA==== +} diff --git a/ipfs-3-to-4/base32/package.json b/ipfs-3-to-4/base32/package.json new file mode 100644 index 00000000..21b2dad5 --- /dev/null +++ b/ipfs-3-to-4/base32/package.json @@ -0,0 +1,14 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/whyrusleeping/base32" + }, + "gx": { + "dvcsimport": "github.com/whyrusleeping/base32" + }, + "gxVersion": "0.7.0", + "language": "go", + "license": "", + "name": "base32", + "version": "0.0.0" +} diff --git a/ipfs-3-to-4/flatfs/flatfs.go b/ipfs-3-to-4/flatfs/flatfs.go new file mode 100644 index 00000000..705ca6d3 --- /dev/null +++ b/ipfs-3-to-4/flatfs/flatfs.go @@ -0,0 +1,379 @@ +// Package flatfs is a Datastore implementation that stores all +// objects in a two-level directory structure in the local file +// system, regardless of the hierarchy of the keys. +package flatfs + +import ( + "errors" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore" + "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-os-rename" +) + +const ( + extension = ".data" + maxPrefixLen = 16 +) + +var ( + ErrBadPrefixLen = errors.New("bad prefix length") +) + +type Datastore struct { + path string + // length of the dir splay prefix + prefixLen int + + // sychronize all writes and directory changes for added safety + sync bool +} + +var _ datastore.Datastore = (*Datastore)(nil) + +func New(path string, prefixLen int, sync bool) (*Datastore, error) { + if prefixLen <= 0 || prefixLen > maxPrefixLen { + return nil, ErrBadPrefixLen + } + fs := &Datastore{ + path: path, + prefixLen: prefixLen, + sync: sync, + } + return fs, nil +} + +var padding = strings.Repeat("_", maxPrefixLen) + +func (fs *Datastore) encode(key datastore.Key) (dir, file string) { + noslash := key.String()[1:] + prefix := (noslash + padding)[:fs.prefixLen] + dir = path.Join(fs.path, prefix) + file = path.Join(dir, noslash+extension) + return dir, file +} + +func (fs *Datastore) decode(file string) (key datastore.Key, ok bool) { + if path.Ext(file) != extension { + return datastore.Key{}, false + } + name := file[:len(file)-len(extension)] + return datastore.NewKey(name), true +} + +func (fs *Datastore) makePrefixDir(dir string) error { + if err := fs.makePrefixDirNoSync(dir); err != nil { + return err + } + + // In theory, if we create a new prefix dir and add a file to + // it, the creation of the prefix dir itself might not be + // durable yet. Sync the root dir after a successful mkdir of + // a prefix dir, just to be paranoid. + if fs.sync { + if err := syncDir(fs.path); err != nil { + return err + } + } + return nil +} + +func (fs *Datastore) makePrefixDirNoSync(dir string) error { + if err := os.Mkdir(dir, 0777); err != nil { + // EEXIST is safe to ignore here, that just means the prefix + // directory already existed. + if !os.IsExist(err) { + return err + } + } + return nil +} + +var putMaxRetries = 3 + +func (fs *Datastore) Put(key datastore.Key, value interface{}) error { + val, ok := value.([]byte) + if !ok { + return datastore.ErrInvalidType + } + + var err error + for i := 0; i < putMaxRetries; i++ { + err = fs.doPut(key, val) + if err == nil { + return nil + } + + if !strings.Contains(err.Error(), "too many open files") { + return err + } + + log.Printf("too many open files, retrying in %dms", 100*i) + time.Sleep(time.Millisecond * 100 * time.Duration(i)) + } + return err +} + +func (fs *Datastore) doPut(key datastore.Key, val []byte) error { + dir, path := fs.encode(key) + if err := fs.makePrefixDir(dir); err != nil { + return err + } + + tmp, err := ioutil.TempFile(dir, "put-") + if err != nil { + return err + } + closed := false + removed := false + defer func() { + if !closed { + // silence errcheck + _ = tmp.Close() + } + if !removed { + // silence errcheck + _ = os.Remove(tmp.Name()) + } + }() + + if _, err := tmp.Write(val); err != nil { + return err + } + if fs.sync { + if err := tmp.Sync(); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + closed = true + + err = osrename.Rename(tmp.Name(), path) + if err != nil { + return err + } + removed = true + + if fs.sync { + if err := syncDir(dir); err != nil { + return err + } + } + return nil +} + +func (fs *Datastore) putMany(data map[datastore.Key]interface{}) error { + var dirsToSync []string + files := make(map[*os.File]string) + + for key, value := range data { + val, ok := value.([]byte) + if !ok { + return datastore.ErrInvalidType + } + dir, path := fs.encode(key) + if err := fs.makePrefixDirNoSync(dir); err != nil { + return err + } + dirsToSync = append(dirsToSync, dir) + + tmp, err := ioutil.TempFile(dir, "put-") + if err != nil { + return err + } + + if _, err := tmp.Write(val); err != nil { + return err + } + + files[tmp] = path + } + + ops := make(map[*os.File]int) + + defer func() { + for fi, _ := range files { + val, _ := ops[fi] + switch val { + case 0: + _ = fi.Close() + fallthrough + case 1: + _ = os.Remove(fi.Name()) + } + } + }() + + // Now we sync everything + // sync and close files + for fi, _ := range files { + if fs.sync { + if err := fi.Sync(); err != nil { + return err + } + } + + if err := fi.Close(); err != nil { + return err + } + + // signify closed + ops[fi] = 1 + } + + // move files to their proper places + for fi, path := range files { + if err := osrename.Rename(fi.Name(), path); err != nil { + return err + } + + // signify removed + ops[fi] = 2 + } + + // now sync the dirs for those files + if fs.sync { + for _, dir := range dirsToSync { + if err := syncDir(dir); err != nil { + return err + } + } + + // sync top flatfs dir + if err := syncDir(fs.path); err != nil { + return err + } + } + + return nil +} + +func (fs *Datastore) Get(key datastore.Key) (value interface{}, err error) { + _, path := fs.encode(key) + data, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, datastore.ErrNotFound + } + // no specific error to return, so just pass it through + return nil, err + } + return data, nil +} + +func (fs *Datastore) Has(key datastore.Key) (exists bool, err error) { + _, path := fs.encode(key) + switch _, err := os.Stat(path); { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} + +func (fs *Datastore) Delete(key datastore.Key) error { + _, path := fs.encode(key) + switch err := os.Remove(path); { + case err == nil: + return nil + case os.IsNotExist(err): + return datastore.ErrNotFound + default: + return err + } +} + +func (fs *Datastore) Query(q query.Query) (query.Results, error) { + if (q.Prefix != "" && q.Prefix != "/") || + len(q.Filters) > 0 || + len(q.Orders) > 0 || + q.Limit > 0 || + q.Offset > 0 || + !q.KeysOnly { + // TODO this is overly simplistic, but the only caller is + // `ipfs refs local` for now, and this gets us moving. + return nil, errors.New("flatfs only supports listing all keys in random order") + } + + reschan := make(chan query.Result) + go func() { + defer close(reschan) + err := filepath.Walk(fs.path, func(path string, info os.FileInfo, err error) error { + if err != nil { + log.Printf("Walk func in Query got error: %v", err) + return err + } + + if !info.Mode().IsRegular() || strings.HasPrefix(info.Name(), ".") { + return nil + } + + key, ok := fs.decode(info.Name()) + if !ok { + log.Println("failed to decode entry in flatfs") + return nil + } + + reschan <- query.Result{ + Entry: query.Entry{ + Key: key.String(), + }, + } + return nil + }) + if err != nil { + log.Println("walk failed: ", err) + } + }() + return query.ResultsWithChan(q, reschan), nil +} + +func (fs *Datastore) Close() error { + return nil +} + +type flatfsBatch struct { + puts map[datastore.Key]interface{} + deletes map[datastore.Key]struct{} + + ds *Datastore +} + +func (bt *flatfsBatch) Put(key datastore.Key, val interface{}) error { + bt.puts[key] = val + return nil +} + +func (bt *flatfsBatch) Delete(key datastore.Key) error { + bt.deletes[key] = struct{}{} + return nil +} + +func (bt *flatfsBatch) Commit() error { + if err := bt.ds.putMany(bt.puts); err != nil { + return err + } + + for k, _ := range bt.deletes { + if err := bt.ds.Delete(k); err != nil { + return err + } + } + + return nil +} + +var _ datastore.ThreadSafeDatastore = (*Datastore)(nil) + +func (*Datastore) IsThreadSafe() {} diff --git a/ipfs-3-to-4/flatfs/flatfs_test.go b/ipfs-3-to-4/flatfs/flatfs_test.go new file mode 100644 index 00000000..46153ffe --- /dev/null +++ b/ipfs-3-to-4/flatfs/flatfs_test.go @@ -0,0 +1,417 @@ +package flatfs_test + +import ( + "encoding/base32" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/flatfs" + "github.com/ipfs/go-datastore/query" + dstest "github.com/ipfs/go-datastore/test" + + rand "github.com/dustin/randbo" +) + +func tempdir(t testing.TB) (path string, cleanup func()) { + path, err := ioutil.TempDir("", "test-datastore-flatfs-") + if err != nil { + t.Fatalf("cannot create temp directory: %v", err) + } + + cleanup = func() { + if err := os.RemoveAll(path); err != nil { + t.Errorf("tempdir cleanup failed: %v", err) + } + } + return path, cleanup +} + +func TestBadPrefixLen(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + for i := 0; i > -3; i-- { + _, err := flatfs.New(temp, i, false) + if g, e := err, flatfs.ErrBadPrefixLen; g != e { + t.Errorf("expected ErrBadPrefixLen, got: %v", g) + } + } +} + +func TestPutBadValueType(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), 22) + if g, e := err, datastore.ErrInvalidType; g != e { + t.Fatalf("expected ErrInvalidType, got: %v\n", g) + } +} + +func TestPut(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } +} + +func TestGet(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + const input = "foobar" + err = fs.Put(datastore.NewKey("quux"), []byte(input)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + data, err := fs.Get(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Get failed: %v", err) + } + buf, ok := data.([]byte) + if !ok { + t.Fatalf("expected []byte from Get, got %T: %v", data, data) + } + if g, e := string(buf), input; g != e { + t.Fatalf("Get gave wrong content: %q != %q", g, e) + } +} + +func TestPutOverwrite(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + const ( + loser = "foobar" + winner = "xyzzy" + ) + err = fs.Put(datastore.NewKey("quux"), []byte(loser)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte(winner)) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + data, err := fs.Get(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Get failed: %v", err) + } + if g, e := string(data.([]byte)), winner; g != e { + t.Fatalf("Get gave wrong content: %q != %q", g, e) + } +} + +func TestGetNotFoundError(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + _, err = fs.Get(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestStorage(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + const prefixLen = 2 + const prefix = "qu" + const target = prefix + string(os.PathSeparator) + "quux.data" + fs, err := flatfs.New(temp, prefixLen, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + seen := false + walk := func(absPath string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + path, err := filepath.Rel(temp, absPath) + if err != nil { + return err + } + switch path { + case ".", "..": + // ignore + case prefix: + if !fi.IsDir() { + t.Errorf("prefix directory is not a file? %v", fi.Mode()) + } + // we know it's there if we see the file, nothing more to + // do here + case target: + seen = true + if !fi.Mode().IsRegular() { + t.Errorf("expected a regular file, mode: %04o", fi.Mode()) + } + if runtime.GOOS != "windows" { + if g, e := fi.Mode()&os.ModePerm&0007, os.FileMode(0000); g != e { + t.Errorf("file should not be world accessible: %04o", fi.Mode()) + } + } + default: + t.Errorf("saw unexpected directory entry: %q %v", path, fi.Mode()) + } + return nil + } + if err := filepath.Walk(temp, walk); err != nil { + t.Fatal("walk: %v", err) + } + if !seen { + t.Error("did not see the data file") + } +} + +func TestHasNotFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + found, err := fs.Has(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Has fail: %v\n", err) + } + if g, e := found, false; g != e { + t.Fatalf("wrong Has: %v != %v", g, e) + } +} + +func TestHasFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + found, err := fs.Has(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Has fail: %v\n", err) + } + if g, e := found, true; g != e { + t.Fatalf("wrong Has: %v != %v", g, e) + } +} + +func TestDeleteNotFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + err = fs.Delete(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected ErrNotFound, got: %v\n", g) + } +} + +func TestDeleteFound(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + err = fs.Put(datastore.NewKey("quux"), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + err = fs.Delete(datastore.NewKey("quux")) + if err != nil { + t.Fatalf("Delete fail: %v\n", err) + } + + // check that it's gone + _, err = fs.Get(datastore.NewKey("quux")) + if g, e := err, datastore.ErrNotFound; g != e { + t.Fatalf("expected Get after Delete to give ErrNotFound, got: %v\n", g) + } +} + +func TestQuerySimple(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + const myKey = "quux" + err = fs.Put(datastore.NewKey(myKey), []byte("foobar")) + if err != nil { + t.Fatalf("Put fail: %v\n", err) + } + + res, err := fs.Query(query.Query{KeysOnly: true}) + if err != nil { + t.Fatalf("Query fail: %v\n", err) + } + entries, err := res.Rest() + if err != nil { + t.Fatalf("Query Results.Rest fail: %v\n", err) + } + seen := false + for _, e := range entries { + switch e.Key { + case datastore.NewKey(myKey).String(): + seen = true + default: + t.Errorf("saw unexpected key: %q", e.Key) + } + } + if !seen { + t.Errorf("did not see wanted key %q in %+v", myKey, entries) + } +} + +func TestBatchPut(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + dstest.RunBatchTest(t, fs) +} + +func TestBatchDelete(t *testing.T) { + temp, cleanup := tempdir(t) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + t.Fatalf("New fail: %v\n", err) + } + + dstest.RunBatchDeleteTest(t, fs) +} + +func BenchmarkConsecutivePut(b *testing.B) { + r := rand.New() + var blocks [][]byte + var keys []datastore.Key + for i := 0; i < b.N; i++ { + blk := make([]byte, 256*1024) + r.Read(blk) + blocks = append(blocks, blk) + + key := base32.StdEncoding.EncodeToString(blk[:8]) + keys = append(keys, datastore.NewKey(key)) + } + temp, cleanup := tempdir(b) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + b.Fatalf("New fail: %v\n", err) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + err := fs.Put(keys[i], blocks[i]) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBatchedPut(b *testing.B) { + r := rand.New() + var blocks [][]byte + var keys []datastore.Key + for i := 0; i < b.N; i++ { + blk := make([]byte, 256*1024) + r.Read(blk) + blocks = append(blocks, blk) + + key := base32.StdEncoding.EncodeToString(blk[:8]) + keys = append(keys, datastore.NewKey(key)) + } + temp, cleanup := tempdir(b) + defer cleanup() + + fs, err := flatfs.New(temp, 2, false) + if err != nil { + b.Fatalf("New fail: %v\n", err) + } + + b.ResetTimer() + + for i := 0; i < b.N; { + batch, err := fs.Batch() + if err != nil { + b.Fatal(err) + } + + for n := i; i-n < 512 && i < b.N; i++ { + err := batch.Put(keys[i], blocks[i]) + if err != nil { + b.Fatal(err) + } + } + err = batch.Commit() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/ipfs-3-to-4/flatfs/sync_std.go b/ipfs-3-to-4/flatfs/sync_std.go new file mode 100644 index 00000000..0608bf97 --- /dev/null +++ b/ipfs-3-to-4/flatfs/sync_std.go @@ -0,0 +1,17 @@ +// +build !windows + +package flatfs + +import "os" + +func syncDir(dir string) error { + dirF, err := os.Open(dir) + if err != nil { + return err + } + defer dirF.Close() + if err := dirF.Sync(); err != nil { + return err + } + return nil +} diff --git a/ipfs-3-to-4/flatfs/sync_windows.go b/ipfs-3-to-4/flatfs/sync_windows.go new file mode 100644 index 00000000..b3b1ce3c --- /dev/null +++ b/ipfs-3-to-4/flatfs/sync_windows.go @@ -0,0 +1,5 @@ +package flatfs + +func syncDir(dir string) error { + return nil +} diff --git a/ipfs-3-to-4/main.go b/ipfs-3-to-4/main.go new file mode 100644 index 00000000..ac40e055 --- /dev/null +++ b/ipfs-3-to-4/main.go @@ -0,0 +1,11 @@ +package main + +import ( + migrate "github.com/ipfs/fs-repo-migrations/go-migrate" + mg3 "github.com/ipfs/fs-repo-migrations/ipfs-3-to-4/migration" +) + +func main() { + m := mg3.Migration{} + migrate.Main(&m) +} diff --git a/ipfs-3-to-4/migration/migration.go b/ipfs-3-to-4/migration/migration.go new file mode 100644 index 00000000..b2f14532 --- /dev/null +++ b/ipfs-3-to-4/migration/migration.go @@ -0,0 +1,463 @@ +package mg3 + +import ( + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "time" + + migrate "github.com/ipfs/fs-repo-migrations/go-migrate" + lock "github.com/ipfs/fs-repo-migrations/ipfs-1-to-2/repolock" + blocks "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/ipfs/go-ipfs/blocks" + util "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/ipfs/go-ipfs/util" + dstore "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore" + flatfs "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/flatfs" + leveldb "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/leveldb" + mount "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/mount" + dsq "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/query" + sync "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync" + rename "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/jbenet/go-os-rename" + base32 "github.com/ipfs/fs-repo-migrations/ipfs-3-to-4/base32" + nuflatfs "github.com/ipfs/fs-repo-migrations/ipfs-3-to-4/flatfs" + mfsr "github.com/ipfs/fs-repo-migrations/mfsr" + log "github.com/ipfs/fs-repo-migrations/stump" +) + +type Migration struct{} + +func (m Migration) Versions() string { + return "3-to-4" +} + +func (m Migration) Reversible() bool { + return true +} + +type validFunc func(string) bool +type mkKeyFunc func(util.Key) dstore.Key +type txFunc func(dstore.Datastore, dstore.Key, []byte, mkKeyFunc) error + +func validateNewKey(s string) bool { + parts := strings.Split(s, "/") + if len(parts) < 3 { + return false + } + + kpart := s[2+len(parts[1]):] + v, err := base32.RawStdEncoding.DecodeString(kpart) + if err == nil && len(v) == 34 { + return true + } + + return false +} + +func oldKeyFunc(prefix string) func(util.Key) dstore.Key { + return func(k util.Key) dstore.Key { + return dstore.NewKey(prefix + string(k)) + } +} + +func validateOldKey(s string) bool { + parts := strings.Split(s, "/") + if len(parts) < 3 { + return false + } + + kpart := s[2+len(parts[1]):] + v, err := base32.RawStdEncoding.DecodeString(kpart) + if err == nil && len(v) == 34 { + // already transfered to new format + return false + } + + return true +} + +func newKeyFunc(prefix string) func(util.Key) dstore.Key { + return func(k util.Key) dstore.Key { + return dstore.NewKey(prefix + base32.RawStdEncoding.EncodeToString([]byte(k))) + } +} + +func (m Migration) Apply(opts migrate.Options) error { + log.Verbose = opts.Verbose + log.Log("applying %s repo migration", m.Versions()) + + log.VLog("locking repo at %q", opts.Path) + lk, err := lock.Lock2(opts.Path) + if err != nil { + return err + } + defer lk.Close() + + repo := mfsr.RepoPath(opts.Path) + + log.VLog(" - verifying version is '3'") + if err := repo.CheckVersion("3"); err != nil { + return err + } + + dsold, dsnew, err := openDatastores(opts.Path) + if err != nil { + return err + } + + log.Log("transfering blocks to new key format") + if err := transferBlocks(filepath.Join(opts.Path, "blocks")); err != nil { + return err + } + + /* + if err := rewriteKeys(dsold, dsnew, "blocks", newKeyFunc("/blocks/"), validateOldKey, transferBlock); err != nil { + return err + } + */ + + log.Log("transferring stored public key records") + if err := rewriteKeys(dsold, dsnew, "pk", newKeyFunc("/pk/"), validateOldKey, transferPubKey); err != nil { + return err + } + + log.Log("transferring stored ipns records") + if err := rewriteKeys(dsold, dsnew, "ipns", newKeyFunc("/ipns/"), validateOldKey, transferIpnsEntries); err != nil { + return err + } + + err = repo.WriteVersion("4") + if err != nil { + return err + } + + log.Log("updated version file") + + return nil +} + +func (m Migration) Revert(opts migrate.Options) error { + log.Verbose = opts.Verbose + log.Log("reverting migration") + lk, err := lock.Lock2(opts.Path) + if err != nil { + return err + } + defer lk.Close() + + repo := mfsr.RepoPath(opts.Path) + if err := repo.CheckVersion("4"); err != nil { + return err + } + + oldds, newds, err := openDatastores(opts.Path) + if err != nil { + return err + } + + log.Log("reverting blocks to old key format") + if err := rewriteKeys(newds, oldds, "blocks", oldKeyFunc("/blocks/"), validateNewKey, transferBlock); err != nil { + return err + } + + if err := cleanEmptyDirs(filepath.Join(opts.Path, "blocks")); err != nil { + return err + } + + log.Log("reverting stored public key records") + if err := rewriteKeys(newds, oldds, "pk", oldKeyFunc("/pk/"), validateNewKey, transferPubKey); err != nil { + return err + } + + log.Log("reverting stored ipns records") + if err := rewriteKeys(newds, oldds, "ipns", oldKeyFunc("/ipns/"), validateNewKey, revertIpnsEntries); err != nil { + return err + } + + // 3) change version number back down + err = repo.WriteVersion("3") + if err != nil { + return err + } + if opts.Verbose { + fmt.Println("lowered version number to 3") + } + + return nil +} + +func openDatastores(repopath string) (a, b dstore.ThreadSafeDatastore, e error) { + log.VLog(" - opening datastore at %q", repopath) + ldbpath := path.Join(repopath, "datastore") + ldb, err := leveldb.NewDatastore(ldbpath, nil) + if err != nil { + return nil, nil, err + } + + blockspath := path.Join(repopath, "blocks") + nfds, err := nuflatfs.New(blockspath, 5, true) + if err != nil { + return nil, nil, err + } + + ofds, err := flatfs.New(blockspath, 4) + if err != nil { + return nil, nil, err + } + + oldds := sync.MutexWrap(mount.New([]mount.Mount{ + { + Prefix: dstore.NewKey("/blocks"), + Datastore: ofds, + }, + { + Prefix: dstore.NewKey("/"), + Datastore: ldb, + }, + })) + + newds := sync.MutexWrap(mount.New([]mount.Mount{ + { + Prefix: dstore.NewKey("/blocks"), + Datastore: nfds, + }, + { + Prefix: dstore.NewKey("/"), + Datastore: ldb, + }, + })) + return oldds, newds, nil +} + +func rewriteKeys(oldds, newds dstore.Datastore, pref string, mkKey mkKeyFunc, valid validFunc, transfer txFunc) error { + + log.Log("gathering keys...") + res, err := oldds.Query(dsq.Query{ + Prefix: pref, + KeysOnly: true, + }) + if err != nil { + return err + } + + entries, err := res.Rest() + if err != nil { + return err + } + + log.Log("got %d keys, beginning transfer. This will take some time.", len(entries)) + + prog := NewProgress(len(entries)) + for _, e := range entries { + prog.Next() + + if !valid(e.Key) { + prog.Skip() + continue + } + + curk := dstore.NewKey(e.Key) + blk, err := oldds.Get(curk) + if err != nil { + return err + } + + blkd, ok := blk.([]byte) + if !ok { + log.Error("data %q was not a []byte", e.Key) + continue + } + + err = transfer(newds, curk, blkd, mkKey) + if err != nil { + return err + } + + err = oldds.Delete(curk) + if err != nil { + return err + } + } + fmt.Println() + + return nil +} + +func transferBlock(ds dstore.Datastore, oldk dstore.Key, data []byte, mkKey mkKeyFunc) error { + b := blocks.NewBlock(data) + dsk := mkKey(b.Key()) + err := ds.Put(dsk, b.Data) + if err != nil { + return err + } + + return nil +} + +func transferPubKey(ds dstore.Datastore, oldk dstore.Key, data []byte, mkKey mkKeyFunc) error { + k := util.Key(util.Hash(data)) + dsk := mkKey(k) + return ds.Put(dsk, data) +} + +func transferIpnsEntries(ds dstore.Datastore, oldk dstore.Key, data []byte, mkkey mkKeyFunc) error { + if len(oldk.String()) != 40 { + log.Log(" - skipping malformed ipns record: %q", oldk) + return nil + } + dsk := dstore.NewKey("/ipns/" + base32.RawStdEncoding.EncodeToString([]byte(oldk.String()[6:]))) + return ds.Put(dsk, data) +} + +func revertIpnsEntries(ds dstore.Datastore, oldk dstore.Key, data []byte, mkkey mkKeyFunc) error { + if len(oldk.String()) != 61 { + log.Log(" - skipping malformed ipns record: %q", oldk) + return nil + } + dec, err := base32.RawStdEncoding.DecodeString(oldk.String()[6:]) + if err != nil { + return err + } + + dsk := dstore.NewKey("/ipns/" + string(dec)) + return ds.Put(dsk, data) +} + +func transferBlocks(flatfsdir string) error { + var keys []string + filepath.Walk(flatfsdir, func(p string, i os.FileInfo, err error) error { + if i.IsDir() { + return nil + } + + if err != nil { + return err + } + + rel := p[len(flatfsdir)+1:] + if !strings.HasSuffix(rel, ".data") { + fmt.Println("skipping (no .data): ", rel) + return nil + } + + keys = append(keys, p) + return nil + }) + + prog := NewProgress(len(keys)) + for _, p := range keys { + prog.Next() + rel := p[len(flatfsdir)+1:] + + justkey := rel[:len(rel)-5] + if validateNewKey(justkey) { + prog.Skip() + fmt.Printf("skipping %s, already in new format\n", justkey) + } + + _, fi := filepath.Split(rel[:len(rel)-5]) + k, err := hex.DecodeString(fi) + if err != nil { + fmt.Printf("failed to decode: %s\n", p) + return err + } + + if len(k) != 34 { + data, err := ioutil.ReadFile(p) + if err != nil { + return err + } + + key := blocks.NewBlock(data).Key() + k = []byte(key) + } + + nname := base32.RawStdEncoding.EncodeToString(k) + ".data" + dirname := nname[:5] + nfiname := filepath.Join(flatfsdir, dirname, nname) + + err = os.MkdirAll(filepath.Join(flatfsdir, dirname), 0755) + if err != nil { + return err + } + + err = rename.Rename(p, nfiname) + if err != nil { + return err + } + } + + fmt.Println() + + err := cleanEmptyDirs(flatfsdir) + if err != nil { + fmt.Println(err) + } + + return nil +} + +func cleanEmptyDirs(dir string) error { + children, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + + for _, c := range children { + if !c.IsDir() { + continue + } + + cdir := filepath.Join(dir, c.Name()) + blocks, err := ioutil.ReadDir(cdir) + if err != nil { + return err + } + + if len(blocks) == 0 { + err := os.Remove(cdir) + if err != nil { + return err + } + } + } + return nil +} + +type progress struct { + total int + current int + skipped int + + start time.Time +} + +func NewProgress(total int) *progress { + return &progress{ + total: total, + start: time.Now(), + } +} + +func (p *progress) Skip() { + p.skipped++ +} + +func (p *progress) Next() { + p.current++ + fmt.Printf("\r[%d / %d]", p.current, p.total) + if p.skipped > 0 { + fmt.Printf(" (skipped: %d)", p.skipped) + } + + if p.current%10 == 9 { + took := time.Now().Sub(p.start) + av := took / time.Duration(p.current) + estim := av * time.Duration(p.total-p.current) + est := strings.Split(estim.String(), ".")[0] + + fmt.Printf(" Approx time remaining: %ss ", est) + } +} diff --git a/main.go b/main.go index 01202b72..05dbed36 100644 --- a/main.go +++ b/main.go @@ -12,13 +12,17 @@ import ( mg1 "github.com/ipfs/fs-repo-migrations/ipfs-1-to-2/migration" homedir "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/mitchellh/go-homedir" mg2 "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/migration" + mg3 "github.com/ipfs/fs-repo-migrations/ipfs-3-to-4/migration" mfsr "github.com/ipfs/fs-repo-migrations/mfsr" ) +var CurrentVersion = 4 + var migrations = []gomigrate.Migration{ &mg0.Migration{}, &mg1.Migration{}, &mg2.Migration{}, + &mg3.Migration{}, } func GetIpfsDir() (string, error) { @@ -120,14 +124,14 @@ func YesNoPrompt(prompt string) bool { } func main() { - target := flag.Int("to", 3, "specify version to upgrade to") + target := flag.Int("to", CurrentVersion, "specify version to upgrade to") yes := flag.Bool("y", false, "answer yes to all prompts") version := flag.Bool("v", false, "print highest repo version and exit") flag.Parse() if *version { - fmt.Println(3) + fmt.Println(CurrentVersion) return } diff --git a/sharness/Makefile b/sharness/Makefile index ba080f67..9d9abc8b 100644 --- a/sharness/Makefile +++ b/sharness/Makefile @@ -16,16 +16,20 @@ BINS += bin/fs-repo-migrations BINS += bin/ipfs-0-to-1 BINS += bin/ipfs-1-to-2 BINS += bin/ipfs-2-to-3 +BINS += bin/ipfs-3-to-4 BINS += bin/ipfs-update BINS += bin/random-files BINS += bin/go-sleep +BINS += bin/pollEndpoint # Source files location FS_REPO_SRC = ../ IPFS_0_TO_1_SRC = ../ipfs-0-to-1 IPFS_1_TO_2_SRC = ../ipfs-1-to-2 IPFS_2_TO_3_SRC = ../ipfs-2-to-3 +IPFS_3_TO_4_SRC = ../ipfs-3-to-4 IPFS_UPDATE_SRC = ../Godeps/_workspace/src/github.com/ipfs/ipfs-update +POLL_ENDPOINT_SRC = ./dependencies/pollEndpoint # User might want to override those on the command line GOFLAGS = @@ -87,6 +91,14 @@ bin/ipfs-2-to-3: $(call find_go_files, $(IPFS_2_TO_3_SRC)) BUILD-OPTIONS @echo "*** installing $@ ***" go build $(GOFLAGS) -o $@ $(IPFS_2_TO_3_SRC) +bin/ipfs-3-to-4: $(call find_go_files, $(IPFS_3_TO_4_SRC)) BUILD-OPTIONS + @echo "*** installing $@ ***" + go build $(GOFLAGS) -o $@ $(IPFS_3_TO_4_SRC) + +bin/pollEndpoint: $(call find_go_files, $(POLL_ENDPOINT_SRC)) BUILD-OPTIONS + @echo "*** installing $@ ***" + go build $(GOFLAGS) -o $@ $(POLL_ENDPOINT_SRC) + BUILD-OPTIONS: FORCE @bin/checkflags '$@' '$(GOFLAGS)' '*** new Go flags ***' diff --git a/sharness/dependencies/pollEndpoint/main.go b/sharness/dependencies/pollEndpoint/main.go new file mode 100644 index 00000000..87d12fa1 --- /dev/null +++ b/sharness/dependencies/pollEndpoint/main.go @@ -0,0 +1,88 @@ +// pollEndpoint is a helper utility that waits for a http endpoint to be reachable and return with http.StatusOK +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" + + ma "github.com/ipfs/fs-repo-migrations/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + manet "github.com/ipfs/fs-repo-migrations/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net" + logging "github.com/ipfs/fs-repo-migrations/ipfs-2-to-3/Godeps/_workspace/src/github.com/whyrusleeping/go-logging" +) + +var ( + host = flag.String("host", "/ip4/127.0.0.1/tcp/5001", "the multiaddr host to dial on") + endpoint = flag.String("ep", "/version", "which http endpoint path to hit") + tries = flag.Int("tries", 10, "how many tries to make before failing") + timeout = flag.Duration("tout", time.Second, "how long to wait between attempts") + verbose = flag.Bool("v", false, "verbose logging") +) + +var log = logging.MustGetLogger("pollEndpoint") + +func main() { + flag.Parse() + + // extract address from host flag + addr, err := ma.NewMultiaddr(*host) + if err != nil { + log.Fatal("NewMultiaddr() failed: ", err) + } + p := addr.Protocols() + if len(p) < 2 { + log.Fatal("need two protocols in host flag (/ip/tcp): ", addr) + } + _, host, err := manet.DialArgs(addr) + if err != nil { + log.Fatal("manet.DialArgs() failed: ", err) + } + + if *verbose { // lower log level + logging.LogLevel("debug") + } + + // construct url to dial + var u url.URL + u.Scheme = "http" + u.Host = host + u.Path = *endpoint + + // show what we got + start := time.Now() + log.Debug("starting at %s, tries: %d, timeout: %s, url: %s", start, *tries, *timeout, u) + + for *tries > 0 { + + err := checkOK(http.Get(u.String())) + if err == nil { + log.Debugf("ok - endpoint reachable with %d tries remaining, took %s", *tries, time.Since(start)) + os.Exit(0) + } + log.Debug("get failed: ", err) + time.Sleep(*timeout) + *tries-- + } + + log.Error("failed.") + os.Exit(1) +} + +func checkOK(resp *http.Response, err error) error { + if err == nil { // request worked + defer resp.Body.Close() + if resp.StatusCode == http.StatusOK { + return nil + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + fmt.Fprintf(os.Stderr, "pollEndpoint: ioutil.ReadAll() Error: %s", err) + } + return fmt.Errorf("Response not OK. %d %s %q", resp.StatusCode, resp.Status, string(body)) + } + return err +} diff --git a/sharness/lib/test-lib.sh b/sharness/lib/test-lib.sh index ae0a7f12..1507606d 100644 --- a/sharness/lib/test-lib.sh +++ b/sharness/lib/test-lib.sh @@ -82,6 +82,7 @@ GUEST_FS_REPO_MIG="sharness/bin/fs-repo-migrations" GUEST_IPFS_0_TO_1="sharness/bin/ipfs-0-to-1" GUEST_IPFS_1_TO_2="sharness/bin/ipfs-1-to-2" GUEST_IPFS_2_TO_3="sharness/bin/ipfs-2-to-3" +GUEST_IPFS_3_TO_4="sharness/bin/ipfs-3-to-4" GUEST_RANDOM_FILES="sharness/bin/random-files" @@ -228,3 +229,171 @@ test_repo_version() { test_cmp expected actual ' } + +test_install_ipfs_nd() { + VERSION="$1" + + # We have to change the PATH as ipfs-update might call fs-repo-migrations + test_expect_success "'ipfs-update install' works for $VERSION" ' + ipfs-update --verbose install $VERSION > actual 2>&1 || + test_fsh cat actual + ' + + test_expect_success "'ipfs-update install' output looks good" ' + grep "fetching ipfs version $VERSION" actual && + grep "installation complete." actual || + test_fsh cat actual + ' + + test_expect_success "'ipfs-update version' works for $VERSION" ' + ipfs-update version > actual + ' + + test_expect_success "'ipfs-update version' output looks good" ' + echo "$VERSION" >expected && + test_cmp expected actual + ' +} + +test_init_ipfs_nd() { + + test_expect_success "ipfs init succeeds" ' + export IPFS_PATH="$(pwd)/.ipfs" && + ipfs init -b=1024 > /dev/null + ' + + test_expect_success "prepare config -- mounting and bootstrap rm" ' + test_config_set Addresses.API "/ip4/127.0.0.1/tcp/0" && + test_config_set Addresses.Gateway "/ip4/127.0.0.1/tcp/0" && + test_config_set --json Addresses.Swarm "[ + \"/ip4/0.0.0.0/tcp/0\" +]" && + ipfs bootstrap rm --all || + test_fsh cat "\"$IPFS_PATH/config\"" + ' +} + +test_config_set() { + + # grab flags (like --bool in "ipfs config --bool") + test_cfg_flags="" # unset in case. + test "$#" = 3 && { test_cfg_flags=$1; shift; } + + test_cfg_key=$1 + test_cfg_val=$2 + + # when verbose, tell the user what config values are being set + test_cfg_cmd="ipfs config $test_cfg_flags \"$test_cfg_key\" \"$test_cfg_val\"" + test "$TEST_VERBOSE" = 1 && echo "$test_cfg_cmd" + + # ok try setting the config key/val pair. + ipfs config $test_cfg_flags "$test_cfg_key" "$test_cfg_val" + echo "$test_cfg_val" >cfg_set_expected + ipfs config "$test_cfg_key" >cfg_set_actual + test_cmp cfg_set_expected cfg_set_actual +} + +test_set_address_vars_nd() { + daemon_output="$1" + + test_expect_success "set up address variables" ' + API_MADDR=$(cat "$IPFS_PATH/api") && + API_ADDR=$(convert_tcp_maddr $API_MADDR) && + API_PORT=$(port_from_maddr $API_MADDR) && + + GWAY_MADDR=$(sed -n "s/^Gateway (.*) server listening on //p" "$daemon_output") && + GWAY_ADDR=$(convert_tcp_maddr $GWAY_MADDR) && + GWAY_PORT=$(port_from_maddr $GWAY_MADDR) + ' + + if ipfs swarm addrs local >/dev/null 2>&1; then + test_expect_success "set swarm address vars" ' + ipfs swarm addrs local > addrs_out && + SWARM_MADDR=$(grep "127.0.0.1" addrs_out) && + SWARM_PORT=$(port_from_maddr $SWARM_MADDR) + ' + fi +} + +convert_tcp_maddr() { + echo $1 | awk -F'/' '{ printf "%s:%s", $3, $5 }' +} + +port_from_maddr() { + echo $1 | awk -F'/' '{ print $NF }' +} + +test_launch_ipfs_daemon() { + + args="$@" + + test "$TEST_ULIMIT_PRESET" != 1 && ulimit -n 1024 + + test_expect_success "'ipfs daemon' succeeds" ' + ipfs daemon $args >actual_daemon 2>daemon_err & + ' + + # wait for api file to show up + test_expect_success "api file shows up" ' + test_wait_for_file 20 100ms "$IPFS_PATH/api" + ' + + test_set_address_vars_nd actual_daemon + + # we say the daemon is ready when the API server is ready. + test_expect_success "'ipfs daemon' is ready" ' + IPFS_PID=$! && + pollEndpoint -ep=/version -host=$API_MADDR -v -tout=1s -tries=60 2>poll_apierr > poll_apiout || + test_fsh cat actual_daemon || test_fsh cat daemon_err || test_fsh cat poll_apierr || test_fsh cat poll_apiout + ' +} + +test_wait_for_file() { + loops=$1 + delay=$2 + file=$3 + fwaitc=0 + while ! test -f "$file" + do + if test $fwaitc -ge $loops + then + echo "Error: timed out waiting for file: $file" + return 1 + fi + + go-sleep $delay + fwaitc=`expr $fwaitc + 1` + done +} + + +test_kill_repeat_10_sec() { + # try to shut down once + wait for graceful exit + kill $1 + for i in $(test_seq 1 100) + do + go-sleep 100ms + ! kill -0 $1 2>/dev/null && return + done + + # if not, try once more, which will skip graceful exit + kill $1 + go-sleep 1s + ! kill -0 $1 2>/dev/null && return + + # ok, no hope. kill it to prevent it messing with other tests + kill -9 $1 2>/dev/null + return 1 +} + +test_kill_ipfs_daemon() { + + test_expect_success "'ipfs daemon' is still running" ' + kill -0 $IPFS_PID + ' + + test_expect_success "'ipfs daemon' can be killed" ' + test_kill_repeat_10_sec $IPFS_PID + ' +} + diff --git a/sharness/t0030-simple-migration.sh b/sharness/t0030-simple-migration.sh index 12298a54..5c9f0fd6 100755 --- a/sharness/t0030-simple-migration.sh +++ b/sharness/t0030-simple-migration.sh @@ -13,7 +13,7 @@ test_expect_success "'fs-repo-migrations -v' works" ' ' test_expect_success "'fs-repo-migrations -v' output looks good" ' - echo "3" >expected && + echo "4" >expected && test_cmp expected actual ' @@ -30,7 +30,7 @@ test_expect_success "'fs-repo-migrations -v' works" ' ' test_expect_success "'fs-repo-migrations -v' output looks good" ' - echo "3" >expected && + echo "4" >expected && test_cmp expected actual ' @@ -47,14 +47,14 @@ test_expect_success ".ipfs/ has been created" ' ' test_expect_success "'fs-repo-migrations -y' works" ' - exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y" >actual + exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y -to=3" >actual ' test_expect_success "'fs-repo-migrations -y' output looks good" ' grep "Migration 2 to 3 succeeded" actual ' -test_install_version "v0.4.0-dev" +test_install_version "v0.4.0" test_expect_success "stop docker container" ' stop_docker "$DOCID" diff --git a/sharness/t0040-migration-2-3.sh b/sharness/t0040-migration-2-3.sh index c33cd005..1754b53e 100755 --- a/sharness/t0040-migration-2-3.sh +++ b/sharness/t0040-migration-2-3.sh @@ -48,7 +48,7 @@ test_expect_success "ipfs cat output looks good" ' ' test_expect_success "'fs-repo-migrations -y' works" ' - exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y" >actual + exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y -to=3" >actual ' test_expect_success "'fs-repo-migrations -y' output looks good" ' @@ -56,7 +56,7 @@ test_expect_success "'fs-repo-migrations -y' output looks good" ' test_fsh cat actual ' -test_install_version "v0.4.0-dev" +test_install_version "v0.4.0" test_expect_success "ipfs cat succeeds with hashes from previous version" ' exec_docker "$DOCID" "ipfs cat \"$MARS\" \"$VENUS\"" >actual diff --git a/sharness/t0050-migration-2-3-pins.sh b/sharness/t0050-migration-2-3-pins.sh index 35ea2b7a..47e2449e 100755 --- a/sharness/t0050-migration-2-3-pins.sh +++ b/sharness/t0050-migration-2-3-pins.sh @@ -81,7 +81,7 @@ test_expect_success "'ipfs pin ls --type=all' works" ' ' test_expect_success "'fs-repo-migrations -y' works" ' - exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y" >actual + exec_docker "$DOCID" "$GUEST_FS_REPO_MIG -y -to=3" >actual ' test_expect_success "'fs-repo-migrations -y' output looks good" ' @@ -89,7 +89,7 @@ test_expect_success "'fs-repo-migrations -y' output looks good" ' test_fsh cat actual ' -test_install_version "v0.4.0-dev" +test_install_version "v0.4.0" test_expect_success "added dir is still pinned recursively" ' exec_docker "$DOCID" "ipfs pin ls --type=recursive" > recurse_actual2 && diff --git a/sharness/t0060-migration-stress.sh b/sharness/t0060-migration-stress.sh index 571831c8..3115e9b2 100755 --- a/sharness/t0060-migration-stress.sh +++ b/sharness/t0060-migration-stress.sh @@ -109,7 +109,7 @@ test_expect_success "get pin lists" ' test_stop_daemon $DOCID -test_install_version "v0.4.0-dev" +test_install_version "v0.4.0" test_start_daemon $DOCID diff --git a/sharness/t0070-migration-many.sh b/sharness/t0070-migration-many.sh index 09082126..31a23c3e 100755 --- a/sharness/t0070-migration-many.sh +++ b/sharness/t0070-migration-many.sh @@ -26,7 +26,7 @@ test_repo_version "0.3.7" test_install_version "v0.3.10" test_repo_version "0.3.10" -test_install_version "v0.4.0-dev" +test_install_version "v0.4.0" test_repo_version "0.4.0" test_install_version "v0.3.8" diff --git a/sharness/t0080-three-to-four.sh b/sharness/t0080-three-to-four.sh new file mode 100755 index 00000000..f6e55c6e --- /dev/null +++ b/sharness/t0080-three-to-four.sh @@ -0,0 +1,285 @@ +#!/bin/sh + +test_description="Test migration 3 to 4 with lots of objects" + +. lib/test-lib.sh + +# setup vars for tests + +export IPFS_DIST_PATH="/ipfs/QmUGSSMCcPTeLCyrjKdozh2XY9VUdJVYxA6LjyJjLPcXST" + +DEPTH=3 +NBDIR=3 +NBFILE=6 +PINTOTAL=20 + +if test_have_prereq EXPENSIVE +then + DEPTH=6 + NBDIR=7 + NBFILE=10 + PINTOTAL=2000 +fi + +PINEACH=$(expr $PINTOTAL / 2) + +echo "DEPTH: $DEPTH" +echo "NBDIR: $NBDIR" +echo "NBFILE: $NBFILE" +echo "PINTOTAL: $PINTOTAL" +echo "PINEACH: $PINEACH" + +export GOPATH="$(pwd)/gopath" +mkdir -p gopath/bin +export PATH="../bin:$GOPATH/bin:$PATH" + + +test_install_ipfs_nd "v0.4.2" + +test_init_ipfs_nd + +test_launch_ipfs_daemon + +test_expect_success "make a couple files" ' + rm -rf manyfiles && + random-files -depth=$DEPTH -dirs=$NBDIR -files=$NBFILE manyfiles > filenames +' + +test_expect_success "add a few files" ' + ipfs add -r -q manyfiles | tee hashes +' + +test_expect_success "unpin root so we can do things ourselves" ' + ipfs pin rm $(tail -n1 hashes) +' + +test_expect_success "select random subset to pin recursively and directly" ' + sort -R hashes | head -n$PINTOTAL > topin && + head -n$PINEACH topin > recurpins && + tail -n$PINEACH topin > directpins +' + +pin_hashes() { + hashes_file="$1" + opts="$2" + for h in `cat $hashes_file`; do + if ! ipfs pin add $opts $h; then + return 1 + fi + done +} + +test_expect_success "pin some objects recursively" ' + pin_hashes recurpins +' + +test_expect_success "pin some objects directly" ' + pin_hashes directpins "-r=false" +' + +test_expect_success "add some files with the path clean bug" ' + printf ba | ipfs add -q > buggy_hashes && + printf bbd | ipfs add -q >> buggy_hashes && + printf cdbd | ipfs add -q >> buggy_hashes && + printf aabdb | ipfs add -q >> buggy_hashes && + printf bccac | ipfs add -q >> buggy_hashes && + echo 0243397916 | ipfs add -q >> buggy_hashes && # produces /../ in binary key + sort buggy_hashes -o buggy_hashes + +' + +test_expect_success "get full ref list" ' + ipfs refs local | sort > start_refs +' + +test_expect_success "ensure buggy hashes dont show up in ref list" ' + comm -12 start_refs buggy_hashes > badrefs && + test ! -s badrefs +' + +test_expect_success "get pin lists" ' + ipfs pin ls --type=recursive | sort > start_rec_pins && + ipfs pin ls --type=direct | sort > start_dir_pins && + ipfs pin ls --type=indirect | sort > start_ind_pins +' + +test_kill_ipfs_daemon + +test_install_ipfs_nd "v0.4.3-dev" + +test_launch_ipfs_daemon + +test_expect_success "list all refs after migration" ' + ipfs refs local | sort > after_refs +' + +test_expect_success "list all pins after migration" ' + ipfs pin ls --type=recursive | sort > after_rec_pins && + ipfs pin ls --type=direct | sort > after_dir_pins && + ipfs pin ls --type=indirect | sort > after_ind_pins +' + +test_expect_success "refs look right" ' + comm -23 start_refs after_refs > missing_refs && + touch empty_refs_file && + test_cmp missing_refs empty_refs_file +' + +test_expect_success "pins all look the same" ' + test_cmp start_rec_pins after_rec_pins && + test_cmp start_dir_pins after_dir_pins && + test_cmp start_ind_pins after_ind_pins +' + +test_expect_success "manually compute gc set" ' + cat after_rec_pins after_dir_pins after_ind_pins | sort > all_pinned +' + +test_expect_success "run a gc" ' + ipfs repo gc | sort > gc_out +' + +test_expect_success "no pinned objects were gc'ed" ' + comm -12 gc_out all_pinned > gced_pinned_objects && + test_cmp empty_refs_file gced_pinned_objects +' + +test_expect_success "list all pins after gc" ' + ipfs pin ls --type=recursive | sort > gc_rec_pins && + ipfs pin ls --type=direct | sort > gc_dir_pins && + ipfs pin ls --type=indirect | sort > gc_ind_pins +' + +test_expect_success "pins all look the same" ' + test_cmp start_rec_pins gc_rec_pins && + test_cmp start_dir_pins gc_dir_pins && + test_cmp start_ind_pins gc_ind_pins +' + +test_expect_success "fetch all refs" ' + ipfs refs local | sort | uniq > post_gc_refs +' + +first_elems() { + cat "$1" | awk '{ print $1 }' +} + +test_expect_success "get just hashes of pins" ' + first_elems all_pinned | sort | uniq > all_pinned_refs +' + +test_kill_ipfs_daemon + +test_can_fetch_buggy_hashes() { + ref_file="$1" + for ref in `cat $ref_file`; do + if ! ipfs block get $ref > /dev/null; then + echo "FAILURE: $ref" + return 1 + fi + done +} + +# this bug was fixed in 0.4.3 +test_expect_success "no pinned objects are missing from local refs" ' + comm -23 all_pinned_refs post_gc_refs > missing_pinned_objects && + printf "" > empty_file && + test_cmp empty_file missing_pinned_objects +' + +test_expect_success "make a couple more files" ' + random-files -depth=$DEPTH -dirs=$NBDIR -files=$NBFILE many_more_files > more_filenames +' + +test_expect_success "add the new files" ' + ipfs add -r -q many_more_files | tee more_hashes +' + +test_expect_success "unpin root so we can do things ourselves" ' + ipfs pin rm $(tail -n1 more_hashes) +' + +test_expect_success "select random subset to pin recursively and directly" ' + sort -R more_hashes | head -n$PINTOTAL > more_topin && + head -n$PINEACH more_topin > more_recurpins && + tail -n$PINEACH more_topin > more_directpins +' + +test_expect_success "pin some objects recursively" ' + pin_hashes more_recurpins +' + +test_expect_success "pin some objects directly" ' + pin_hashes more_directpins "-r=false" +' + +test_expect_success "get full ref list" ' + ipfs refs local | sort > more_start_refs +' + +test_expect_success "get pin lists" ' + ipfs pin ls --type=recursive | sort > more_start_rec_pins && + ipfs pin ls --type=direct | sort > more_start_dir_pins && + ipfs pin ls --type=indirect | sort > more_start_ind_pins +' + +test_expect_success "'ipfs-3-to-4 -revert' succeeds" ' + ipfs-3-to-4 -revert -path="$IPFS_PATH" >actual +' + +test_expect_success "'ipfs-3-to-4 -revert' output looks good" ' + grep "reverting blocks" actual || + test_fsh cat actual +' + +test_install_ipfs_nd "v0.4.2" + +test_launch_ipfs_daemon + +test_expect_success "list all refs after reverting migration" ' + ipfs refs local | sort > after_revert_refs +' + +test_expect_success "list all pins after reverting migration" ' + ipfs pin ls --type=recursive | sort > after_revert_rec_pins && + ipfs pin ls --type=direct | sort > after_revert_dir_pins && + ipfs pin ls --type=indirect | sort > after_revert_ind_pins +' + +test_can_fetch_buggy_hashes() { + ref_file="$1" + for ref in `cat $ref_file`; do + if ! ipfs block get $ref > /dev/null; then + echo "FAILURE: $ref" + return 1 + fi + done +} + +test_expect_success "refs look right" ' + comm -23 more_start_refs after_revert_refs > missing_refs && + test_can_fetch_buggy_hashes missing_refs +' + +test_expect_success "pins all look the same" ' + test_cmp more_start_rec_pins after_revert_rec_pins && + test_cmp more_start_dir_pins after_revert_dir_pins && + test_cmp more_start_ind_pins after_revert_ind_pins +' + +test_expect_success "manually compute gc set" ' + cat after_revert_rec_pins after_revert_dir_pins after_revert_ind_pins | sort > after_revert_all_pinned +' + +test_expect_success "run a gc" ' + ipfs repo gc | sort > gc_out +' + +test_expect_success "no pinned objects were gc'ed" ' + comm -12 gc_out after_revert_all_pinned > gced_pinned_objects && + test_cmp empty_refs_file gced_pinned_objects +' + +test_kill_ipfs_daemon + +test_done