Skip to content

Commit

Permalink
s2: Clarify EncodeBuffer usage (#384)
Browse files Browse the repository at this point in the history
EncodeBuffer cannot safely reuse the buffer in concurrent usage.
  • Loading branch information
klauspost authored Jun 2, 2021
1 parent d172db7 commit ab9d76f
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 1 deletion.
4 changes: 3 additions & 1 deletion s2/encode.go
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,9 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
// EncodeBuffer will add a buffer to the stream.
// This is the fastest way to encode a stream,
// but the input buffer cannot be written to by the caller
// until this function, Flush or Close has been called.
// until Flush or Close has been called when concurrency != 1.
//
// If you cannot control that, use the regular Write function.
//
// Note that input is not buffered.
// This means that each write will result in discrete blocks being created.
Expand Down
104 changes: 104 additions & 0 deletions s2/encode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,110 @@ func TestWriterPadding(t *testing.T) {
}
}

func TestBigRegularWrites(t *testing.T) {
var buf [maxBlockSize * 2]byte
dst := bytes.NewBuffer(nil)
enc := NewWriter(dst, WriterBestCompression())
max := uint8(10)
if testing.Short() {
max = 4
}
for n := uint8(0); n < max; n++ {
for i := range buf[:] {
buf[i] = n
}
// Writes may not keep a reference to the data beyond the Write call.
_, err := enc.Write(buf[:])
if err != nil {
t.Fatal(err)
}
}
err := enc.Close()
if err != nil {
t.Fatal(err)
}

dec := NewReader(dst)
_, err = io.Copy(ioutil.Discard, dec)
if err != nil {
t.Fatal(err)
}
}

func TestBigEncodeBuffer(t *testing.T) {
const blockSize = 1 << 20
var buf [blockSize * 2]byte
dst := bytes.NewBuffer(nil)
enc := NewWriter(dst, WriterBlockSize(blockSize), WriterBestCompression())
max := uint8(10)
if testing.Short() {
max = 4
}
for n := uint8(0); n < max; n++ {
// Change the buffer to a new value.
for i := range buf[:] {
buf[i] = n
}
err := enc.EncodeBuffer(buf[:])
if err != nil {
t.Fatal(err)
}
// We can write it again since we aren't changing it.
err = enc.EncodeBuffer(buf[:])
if err != nil {
t.Fatal(err)
}
err = enc.Flush()
if err != nil {
t.Fatal(err)
}
}
err := enc.Close()
if err != nil {
t.Fatal(err)
}

dec := NewReader(dst)
n, err := io.Copy(ioutil.Discard, dec)
if err != nil {
t.Fatal(err)
}
t.Log(n)
}

func TestBigEncodeBufferSync(t *testing.T) {
const blockSize = 1 << 20
var buf [blockSize * 2]byte
dst := bytes.NewBuffer(nil)
enc := NewWriter(dst, WriterBlockSize(blockSize), WriterConcurrency(1), WriterBestCompression())
max := uint8(10)
if testing.Short() {
max = 2
}
for n := uint8(0); n < max; n++ {
// Change the buffer to a new value.
for i := range buf[:] {
buf[i] = n
}
// When WriterConcurrency == 1 we can encode and reuse the buffer.
err := enc.EncodeBuffer(buf[:])
if err != nil {
t.Fatal(err)
}
}
err := enc.Close()
if err != nil {
t.Fatal(err)
}

dec := NewReader(dst)
n, err := io.Copy(ioutil.Discard, dec)
if err != nil {
t.Fatal(err)
}
t.Log(n)
}

func BenchmarkWriterRandom(b *testing.B) {
rng := rand.New(rand.NewSource(1))
// Make max window so we never get matches.
Expand Down

0 comments on commit ab9d76f

Please sign in to comment.