githaven-fork/vendor/github.com/andybalholm/brotli/ringbuffer.go
6543 12a1f914f4
Vendor Update Go Libs (#13166)
* update github.com/alecthomas/chroma v0.8.0 -> v0.8.1

* github.com/blevesearch/bleve v1.0.10 -> v1.0.12

* editorconfig-core-go v2.1.1 -> v2.3.7

* github.com/gliderlabs/ssh v0.2.2 -> v0.3.1

* migrate editorconfig.ParseBytes to Parse

* github.com/shurcooL/vfsgen to 0d455de96546

* github.com/go-git/go-git/v5 v5.1.0 -> v5.2.0

* github.com/google/uuid v1.1.1 -> v1.1.2

* github.com/huandu/xstrings v1.3.0 -> v1.3.2

* github.com/klauspost/compress v1.10.11 -> v1.11.1

* github.com/markbates/goth v1.61.2 -> v1.65.0

* github.com/mattn/go-sqlite3 v1.14.0 -> v1.14.4

* github.com/mholt/archiver v3.3.0 -> v3.3.2

* github.com/microcosm-cc/bluemonday 4f7140c49acb -> v1.0.4

* github.com/minio/minio-go v7.0.4 -> v7.0.5

* github.com/olivere/elastic v7.0.9 -> v7.0.20

* github.com/urfave/cli v1.20.0 -> v1.22.4

* github.com/prometheus/client_golang v1.1.0 -> v1.8.0

* github.com/xanzy/go-gitlab v0.37.0 -> v0.38.1

* mvdan.cc/xurls v2.1.0 -> v2.2.0

Co-authored-by: Lauris BH <lauris@nix.lv>
2020-10-16 01:06:27 -04:00

135 lines
4.4 KiB
Go
Vendored

package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
data in a circular manner: writing a byte writes it to:
`position() % (1 << window_bits)'.
For convenience, the ringBuffer array contains another copy of the
first `1 << tail_bits' bytes:
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
and another copy of the last two bytes:
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
type ringBuffer struct {
size_ uint32
mask_ uint32
tail_size_ uint32
total_size_ uint32
cur_size_ uint32
pos_ uint32
data_ []byte
buffer_ []byte
}
func ringBufferInit(rb *ringBuffer) {
rb.pos_ = 0
}
func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
var window_bits int = computeRbBits(params)
var tail_bits int = params.lgblock
*(*uint32)(&rb.size_) = 1 << uint(window_bits)
*(*uint32)(&rb.mask_) = (1 << uint(window_bits)) - 1
*(*uint32)(&rb.tail_size_) = 1 << uint(tail_bits)
*(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_
}
const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros. */
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
var new_data []byte
var i uint
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
if cap(rb.data_) < size {
new_data = make([]byte, size)
} else {
new_data = rb.data_[:size]
}
if rb.data_ != nil {
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
}
rb.data_ = new_data
rb.cur_size_ = buflen
rb.buffer_ = rb.data_[2:]
rb.data_[1] = 0
rb.data_[0] = rb.data_[1]
for i = 0; i < kSlackForEightByteHashingEverywhere; i++ {
rb.buffer_[rb.cur_size_+uint32(i)] = 0
}
}
func ringBufferWriteTail(bytes []byte, n uint, rb *ringBuffer) {
var masked_pos uint = uint(rb.pos_ & rb.mask_)
if uint32(masked_pos) < rb.tail_size_ {
/* Just fill the tail buffer with the beginning data. */
var p uint = uint(rb.size_ + uint32(masked_pos))
copy(rb.buffer_[p:], bytes[:brotli_min_size_t(n, uint(rb.tail_size_-uint32(masked_pos)))])
}
}
/* Push bytes into the ring buffer. */
func ringBufferWrite(bytes []byte, n uint, rb *ringBuffer) {
if rb.pos_ == 0 && uint32(n) < rb.tail_size_ {
/* Special case for the first write: to process the first block, we don't
need to allocate the whole ring-buffer and we don't need the tail
either. However, we do this memory usage optimization only if the
first write is less than the tail size, which is also the input block
size, otherwise it is likely that other blocks will follow and we
will need to reallocate to the full size anyway. */
rb.pos_ = uint32(n)
ringBufferInitBuffer(rb.pos_, rb)
copy(rb.buffer_, bytes[:n])
return
}
if rb.cur_size_ < rb.total_size_ {
/* Lazily allocate the full buffer. */
ringBufferInitBuffer(rb.total_size_, rb)
/* Initialize the last two bytes to zero, so that we don't have to worry
later when we copy the last two bytes to the first two positions. */
rb.buffer_[rb.size_-2] = 0
rb.buffer_[rb.size_-1] = 0
}
{
var masked_pos uint = uint(rb.pos_ & rb.mask_)
/* The length of the writes is limited so that we do not need to worry
about a write */
ringBufferWriteTail(bytes, n, rb)
if uint32(masked_pos+n) <= rb.size_ {
/* A single write fits. */
copy(rb.buffer_[masked_pos:], bytes[:n])
} else {
/* Split into two writes.
Copy into the end of the buffer, including the tail buffer. */
copy(rb.buffer_[masked_pos:], bytes[:brotli_min_size_t(n, uint(rb.total_size_-uint32(masked_pos)))])
/* Copy into the beginning of the buffer */
copy(rb.buffer_, bytes[rb.size_-uint32(masked_pos):][:uint32(n)-(rb.size_-uint32(masked_pos))])
}
}
{
var not_first_lap bool = rb.pos_&(1<<31) != 0
var rb_pos_mask uint32 = (1 << 31) - 1
rb.data_[0] = rb.buffer_[rb.size_-2]
rb.data_[1] = rb.buffer_[rb.size_-1]
rb.pos_ = (rb.pos_ & rb_pos_mask) + uint32(uint32(n)&rb_pos_mask)
if not_first_lap {
/* Wrap, but preserve not-a-first-lap feature. */
rb.pos_ |= 1 << 31
}
}
}