githaven-fork/vendor/github.com/andybalholm/brotli/reader.go
6543 86e2789960
Vendor Update (#16121)
* update github.com/PuerkitoBio/goquery

* update github.com/alecthomas/chroma

* update github.com/blevesearch/bleve/v2

* update github.com/caddyserver/certmagic

* update github.com/go-enry/go-enry/v2

* update github.com/go-git/go-billy/v5

* update github.com/go-git/go-git/v5

* update github.com/go-redis/redis/v8

* update github.com/go-testfixtures/testfixtures/v3

* update github.com/jaytaylor/html2text

* update github.com/json-iterator/go

* update github.com/klauspost/compress

* update github.com/markbates/goth

* update github.com/mattn/go-isatty

* update github.com/mholt/archiver/v3

* update github.com/microcosm-cc/bluemonday

* update github.com/minio/minio-go/v7

* update github.com/prometheus/client_golang

* update github.com/unrolled/render

* update github.com/xanzy/go-gitlab

* update github.com/yuin/goldmark

* update github.com/yuin/goldmark-highlighting

Co-authored-by: techknowlogick <techknowlogick@gitea.io>
2021-06-10 16:44:25 +02:00

103 lines
2.3 KiB
Go
Vendored

package brotli
import (
"errors"
"io"
)
type decodeError int
func (err decodeError) Error() string {
return "brotli: " + string(decoderErrorString(int(err)))
}
var errExcessiveInput = errors.New("brotli: excessive input")
var errInvalidState = errors.New("brotli: invalid state")
// readBufSize is a "good" buffer size that avoids excessive round-trips
// between C and Go but doesn't waste too much memory on buffering.
// It is arbitrarily chosen to be equal to the constant used in io.Copy.
const readBufSize = 32 * 1024
// NewReader creates a new Reader reading the given reader.
func NewReader(src io.Reader) *Reader {
r := new(Reader)
r.Reset(src)
return r
}
// Reset discards the Reader's state and makes it equivalent to the result of
// its original state from NewReader, but writing to src instead.
// This permits reusing a Reader rather than allocating a new one.
// Error is always nil
func (r *Reader) Reset(src io.Reader) error {
decoderStateInit(r)
r.src = src
if r.buf == nil {
r.buf = make([]byte, readBufSize)
}
return nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
if !decoderHasMoreOutput(r) && len(r.in) == 0 {
m, readErr := r.src.Read(r.buf)
if m == 0 {
// If readErr is `nil`, we just proxy underlying stream behavior.
return 0, readErr
}
r.in = r.buf[:m]
}
if len(p) == 0 {
return 0, nil
}
for {
var written uint
in_len := uint(len(r.in))
out_len := uint(len(p))
in_remaining := in_len
out_remaining := out_len
result := decoderDecompressStream(r, &in_remaining, &r.in, &out_remaining, &p)
written = out_len - out_remaining
n = int(written)
switch result {
case decoderResultSuccess:
if len(r.in) > 0 {
return n, errExcessiveInput
}
return n, nil
case decoderResultError:
return n, decodeError(decoderGetErrorCode(r))
case decoderResultNeedsMoreOutput:
if n == 0 {
return 0, io.ErrShortBuffer
}
return n, nil
case decoderNeedsMoreInput:
}
if len(r.in) != 0 {
return 0, errInvalidState
}
// Calling r.src.Read may block. Don't block if we have data to return.
if n > 0 {
return n, nil
}
// Top off the buffer.
encN, err := r.src.Read(r.buf)
if encN == 0 {
// Not enough data to complete decoding.
if err == io.EOF {
return 0, io.ErrUnexpectedEOF
}
return 0, err
}
r.in = r.buf[:encN]
}
}