add changelog
This commit is contained in:
parent
14dd738de7
commit
d8c0b0c0f2
1
.gitignore
vendored
1
.gitignore
vendored
@ -160,4 +160,3 @@ fabric.properties
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,goland,go
|
||||
/recover.bolt
|
||||
vendor
|
||||
|
4
CHANGELOG.md
Normal file
4
CHANGELOG.md
Normal file
@ -0,0 +1,4 @@
|
||||
#v1.0.0
|
||||
|
||||
- исправлен баг с вешающимся логгером
|
||||
- разделили обработчики а админские и клиентские. теперь у клиента нет доступа к адмиским обработчикам, потому что он туда ими не торчит
|
@ -8,7 +8,11 @@ WORKDIR /app
|
||||
# Create binary directory
|
||||
RUN mkdir /app/bin -p
|
||||
# Add main files to app
|
||||
RUN apk add git
|
||||
ADD . .
|
||||
ENV GOPRIVATE=penahub.gitlab.yandexcloud.net/backend/penahub_common
|
||||
RUN git config --global url."https://buildToken:glpat-axA8ttckx3aPf_xd2Dym@penahub.gitlab.yandexcloud.net/".insteadOf "https://penahub.gitlab.yandexcloud.net/"
|
||||
RUN go mod download
|
||||
# Build app
|
||||
RUN GOOS=linux go build -o bin ./...
|
||||
|
||||
|
2
go.mod
2
go.mod
@ -20,7 +20,7 @@ require (
|
||||
google.golang.org/grpc v1.60.1
|
||||
google.golang.org/protobuf v1.32.0
|
||||
penahub.gitlab.yandexcloud.net/backend/penahub_common v0.0.0-20240607202348-efe5f2bf3e8c
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240803124813-79e62d2acf3c
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240828181923-80f1728efccc
|
||||
penahub.gitlab.yandexcloud.net/external/trashlog v0.1.5
|
||||
)
|
||||
|
||||
|
2
go.sum
2
go.sum
@ -295,5 +295,7 @@ penahub.gitlab.yandexcloud.net/backend/penahub_common v0.0.0-20240607202348-efe5
|
||||
penahub.gitlab.yandexcloud.net/backend/penahub_common v0.0.0-20240607202348-efe5f2bf3e8c/go.mod h1:+bPxq2wfW5S1gd+83vZYmHm33AE7nEBfznWS8AM1TKE=
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240803124813-79e62d2acf3c h1:imtXaIVscs8it6SfAmDxjNxqQSF44GgCTl1N6JT6unA=
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240803124813-79e62d2acf3c/go.mod h1:i7M72RIpkSjcQtHID6KKj9RT/EYZ1rxS6tIPKWa/BSY=
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240828181923-80f1728efccc h1:tKp1NYn+FJOq/mCyc4SJjcGdYdr1AhXnyw7f80uex0A=
|
||||
penahub.gitlab.yandexcloud.net/devops/linters/golang.git v0.0.0-20240828181923-80f1728efccc/go.mod h1:i7M72RIpkSjcQtHID6KKj9RT/EYZ1rxS6tIPKWa/BSY=
|
||||
penahub.gitlab.yandexcloud.net/external/trashlog v0.1.5 h1:amsK0bkSJxBisk334aFo5ZmVPvN1dBT0Sv5j3V5IsT8=
|
||||
penahub.gitlab.yandexcloud.net/external/trashlog v0.1.5/go.mod h1:J8kQNEP4bL7ZNKHxuT4tfe6a3FHyovpAPkyytN4qllc=
|
||||
|
28
vendor/github.com/ClickHouse/clickhouse-go/.gitignore
generated
vendored
28
vendor/github.com/ClickHouse/clickhouse-go/.gitignore
generated
vendored
@ -1,28 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.out
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
coverage.txt
|
||||
.idea/**
|
20
vendor/github.com/ClickHouse/clickhouse-go/.travis.yml
generated
vendored
20
vendor/github.com/ClickHouse/clickhouse-go/.travis.yml
generated
vendored
@ -1,20 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- 1.15.x
|
||||
- 1.16.x
|
||||
go_import_path: github.com/ClickHouse/clickhouse-go
|
||||
services:
|
||||
- docker
|
||||
install:
|
||||
- export GO111MODULE="on"
|
||||
- go mod vendor
|
||||
|
||||
before_install:
|
||||
- docker --version
|
||||
- docker-compose --version
|
||||
- docker-compose up -d
|
||||
script:
|
||||
- ./go.test.sh
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
10
vendor/github.com/ClickHouse/clickhouse-go/CONTRIBUTING.md
generated
vendored
10
vendor/github.com/ClickHouse/clickhouse-go/CONTRIBUTING.md
generated
vendored
@ -1,10 +0,0 @@
|
||||
# Contributing notes
|
||||
|
||||
## Local setup
|
||||
|
||||
The easiest way to run tests is to use Docker Compose:
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
make
|
||||
```
|
21
vendor/github.com/ClickHouse/clickhouse-go/LICENSE
generated
vendored
21
vendor/github.com/ClickHouse/clickhouse-go/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017-2020 Kirill Shvakov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
8
vendor/github.com/ClickHouse/clickhouse-go/Makefile
generated
vendored
8
vendor/github.com/ClickHouse/clickhouse-go/Makefile
generated
vendored
@ -1,8 +0,0 @@
|
||||
test:
|
||||
go install -race -v
|
||||
go test -i -v
|
||||
go test -race -timeout 30s -v .
|
||||
|
||||
coverage:
|
||||
go test -coverprofile=coverage.out -v .
|
||||
go tool cover -html=coverage.out
|
307
vendor/github.com/ClickHouse/clickhouse-go/README.md
generated
vendored
307
vendor/github.com/ClickHouse/clickhouse-go/README.md
generated
vendored
@ -1,307 +0,0 @@
|
||||
# ClickHouse [](https://travis-ci.org/ClickHouse/clickhouse-go) [](https://goreportcard.com/report/github.com/ClickHouse/clickhouse-go) [](https://codecov.io/gh/ClickHouse/clickhouse-go)
|
||||
|
||||
Golang SQL database driver for [Yandex ClickHouse](https://clickhouse.yandex/)
|
||||
|
||||
## Key features
|
||||
|
||||
* Uses native ClickHouse TCP client-server protocol
|
||||
* Compatibility with `database/sql`
|
||||
* Round Robin load-balancing
|
||||
* Bulk write support : `begin->prepare->(in loop exec)->commit`
|
||||
* LZ4 compression support (default is pure go lz4 or switch to use cgo lz4 by turning clz4 build tags on)
|
||||
* External Tables support
|
||||
|
||||
## DSN
|
||||
|
||||
* username/password - auth credentials
|
||||
* database - select the current default database
|
||||
* read_timeout/write_timeout - timeout in second
|
||||
* no_delay - disable/enable the Nagle Algorithm for tcp socket (default is 'true' - disable)
|
||||
* alt_hosts - comma-separated list of single address hosts for load-balancing
|
||||
* connection_open_strategy - random/in_order (default random).
|
||||
* random - choose a random server from the set
|
||||
* in_order - first live server is chosen in specified order
|
||||
* time_random - choose random (based on the current time) server from the set. This option differs from `random` because randomness is based on the current time rather than on the number of previous connections.
|
||||
* block_size - maximum rows in block (default is 1000000). If the rows are larger, the data will be split into several blocks to send to the server. If one block was sent to the server, the data would be persisted on the server disk, and we can't roll back the transaction. So always keep in mind that the batch size is no larger than the block_size if you want an atomic batch insert.
|
||||
* pool_size - the maximum amount of preallocated byte chunks used in queries (default is 100). Decrease this if you experience memory problems at the expense of more GC pressure and vice versa.
|
||||
* debug - enable debug output (boolean value)
|
||||
* compress - enable lz4 compression (integer value, default is '0')
|
||||
* check_connection_liveness - on supported platforms non-secure connections retrieved from the connection pool are checked in beginTx() for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection. (boolean value, default is 'true')
|
||||
|
||||
SSL/TLS parameters:
|
||||
|
||||
* secure - establish secure connection (default is false)
|
||||
* skip_verify - skip certificate verification (default is false)
|
||||
* tls_config - name of a TLS config with client certificates, registered using `clickhouse.RegisterTLSConfig()`; implies secure to be true, unless explicitly specified
|
||||
|
||||
Example:
|
||||
|
||||
```sh
|
||||
tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000
|
||||
```
|
||||
|
||||
## Supported data types
|
||||
|
||||
* UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64
|
||||
* Float32, Float64
|
||||
* String
|
||||
* FixedString(N)
|
||||
* Date
|
||||
* DateTime
|
||||
* IPv4
|
||||
* IPv6
|
||||
* Enum
|
||||
* UUID
|
||||
* Nullable(T)
|
||||
* [Array(T)](https://clickhouse.yandex/reference_en.html#Array(T)) [godoc](https://godoc.org/github.com/ClickHouse/clickhouse-go#Array)
|
||||
* Array(Nullable(T))
|
||||
* Tuple(...T)
|
||||
|
||||
## TODO
|
||||
|
||||
* Support other compression methods(zstd ...)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
go get -u github.com/ClickHouse/clickhouse-go
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sql.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := connect.Ping(); err != nil {
|
||||
if exception, ok := err.(*clickhouse.Exception); ok {
|
||||
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = connect.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS example (
|
||||
country_code FixedString(2),
|
||||
os_id UInt8,
|
||||
browser_id UInt8,
|
||||
categories Array(Int16),
|
||||
action_day Date,
|
||||
action_time DateTime
|
||||
) engine=Memory
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var (
|
||||
tx, _ = connect.Begin()
|
||||
stmt, _ = tx.Prepare("INSERT INTO example (country_code, os_id, browser_id, categories, action_day, action_time) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
)
|
||||
defer stmt.Close()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if _, err := stmt.Exec(
|
||||
"RU",
|
||||
10+i,
|
||||
100+i,
|
||||
clickhouse.Array([]int16{1, 2, 3}),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rows, err := connect.Query("SELECT country_code, os_id, browser_id, categories, action_day, action_time FROM example")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
country string
|
||||
os, browser uint8
|
||||
categories []int16
|
||||
actionDay, actionTime time.Time
|
||||
)
|
||||
if err := rows.Scan(&country, &os, &browser, &categories, &actionDay, &actionTime); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_day: %s, action_time: %s", country, os, browser, categories, actionDay, actionTime)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := connect.Exec("DROP TABLE example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Use [sqlx](https://github.com/jmoiron/sqlx)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sqlx.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var items []struct {
|
||||
CountryCode string `db:"country_code"`
|
||||
OsID uint8 `db:"os_id"`
|
||||
BrowserID uint8 `db:"browser_id"`
|
||||
Categories []int16 `db:"categories"`
|
||||
ActionTime time.Time `db:"action_time"`
|
||||
}
|
||||
|
||||
if err := connect.Select(&items, "SELECT country_code, os_id, browser_id, categories, action_time FROM example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_time: %s", item.CountryCode, item.OsID, item.BrowserID, item.Categories, item.ActionTime)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### External tables support
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sql.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := connect.Ping(); err != nil {
|
||||
if exception, ok := err.(*clickhouse.Exception); ok {
|
||||
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = connect.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS example (
|
||||
country_code FixedString(2),
|
||||
os_id UInt8,
|
||||
browser_id UInt8,
|
||||
categories Array(Int16),
|
||||
action_day Date,
|
||||
action_time DateTime
|
||||
) engine=Memory
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var (
|
||||
tx, _ = connect.Begin()
|
||||
stmt, _ = tx.Prepare("INSERT INTO example (country_code, os_id, browser_id, categories, action_day, action_time) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
)
|
||||
defer stmt.Close()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if _, err := stmt.Exec(
|
||||
"RU",
|
||||
10+i,
|
||||
100+i,
|
||||
clickhouse.Array([]int16{1, 2, 3}),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
col, err := column.Factory("country_code", "String", nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
countriesExternalTable := clickhouse.ExternalTable{
|
||||
Name: "countries",
|
||||
Values: [][]driver.Value{
|
||||
{"RU"},
|
||||
},
|
||||
Columns: []column.Column{col},
|
||||
}
|
||||
|
||||
rows, err := connect.Query("SELECT country_code, os_id, browser_id, categories, action_day, action_time "+
|
||||
"FROM example WHERE country_code IN ?", countriesExternalTable)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
country string
|
||||
os, browser uint8
|
||||
categories []int16
|
||||
actionDay, actionTime time.Time
|
||||
)
|
||||
if err := rows.Scan(&country, &os, &browser, &categories, &actionDay, &actionTime); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_day: %s, action_time: %s", country, os, browser, categories, actionDay, actionTime)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := connect.Exec("DROP TABLE example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
21
vendor/github.com/ClickHouse/clickhouse-go/array.go
generated
vendored
21
vendor/github.com/ClickHouse/clickhouse-go/array.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func Array(v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayFixedString(len int, v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDate(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDateTime(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
255
vendor/github.com/ClickHouse/clickhouse-go/bootstrap.go
generated
vendored
255
vendor/github.com/ClickHouse/clickhouse-go/bootstrap.go
generated
vendored
@ -1,255 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDatabase when connecting to ClickHouse
|
||||
DefaultDatabase = "default"
|
||||
// DefaultUsername when connecting to ClickHouse
|
||||
DefaultUsername = "default"
|
||||
// DefaultConnTimeout when connecting to ClickHouse
|
||||
DefaultConnTimeout = 5 * time.Second
|
||||
// DefaultReadTimeout when reading query results
|
||||
DefaultReadTimeout = time.Minute
|
||||
// DefaultWriteTimeout when sending queries
|
||||
DefaultWriteTimeout = time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
unixtime int64
|
||||
logOutput io.Writer = os.Stdout
|
||||
hostname, _ = os.Hostname()
|
||||
poolInit sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register("clickhouse", &bootstrap{})
|
||||
go func() {
|
||||
for tick := time.Tick(time.Second); ; {
|
||||
select {
|
||||
case <-tick:
|
||||
atomic.AddInt64(&unixtime, int64(time.Second))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func now() time.Time {
|
||||
return time.Unix(0, atomic.LoadInt64(&unixtime))
|
||||
}
|
||||
|
||||
type bootstrap struct{}
|
||||
|
||||
func (d *bootstrap) Open(dsn string) (driver.Conn, error) {
|
||||
return Open(dsn)
|
||||
}
|
||||
|
||||
// SetLogOutput allows to change output of the default logger
|
||||
func SetLogOutput(output io.Writer) {
|
||||
logOutput = output
|
||||
}
|
||||
|
||||
// Open the connection
|
||||
func Open(dsn string) (driver.Conn, error) {
|
||||
clickhouse, err := open(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clickhouse, err
|
||||
}
|
||||
|
||||
func open(dsn string) (*clickhouse, error) {
|
||||
url, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
hosts = []string{url.Host}
|
||||
query = url.Query()
|
||||
secure = false
|
||||
skipVerify = false
|
||||
tlsConfigName = query.Get("tls_config")
|
||||
noDelay = true
|
||||
compress = false
|
||||
database = query.Get("database")
|
||||
username = query.Get("username")
|
||||
password = query.Get("password")
|
||||
blockSize = 1000000
|
||||
connTimeout = DefaultConnTimeout
|
||||
readTimeout = DefaultReadTimeout
|
||||
writeTimeout = DefaultWriteTimeout
|
||||
connOpenStrategy = connOpenRandom
|
||||
checkConnLiveness = true
|
||||
)
|
||||
if len(database) == 0 {
|
||||
database = DefaultDatabase
|
||||
}
|
||||
if len(username) == 0 {
|
||||
username = DefaultUsername
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("no_delay")); err == nil {
|
||||
noDelay = v
|
||||
}
|
||||
tlsConfig := getTLSConfigClone(tlsConfigName)
|
||||
if tlsConfigName != "" && tlsConfig == nil {
|
||||
return nil, fmt.Errorf("invalid tls_config - no config registered under name %s", tlsConfigName)
|
||||
}
|
||||
secure = tlsConfig != nil
|
||||
if v, err := strconv.ParseBool(query.Get("secure")); err == nil {
|
||||
secure = v
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("skip_verify")); err == nil {
|
||||
skipVerify = v
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("timeout"), 64); err == nil {
|
||||
connTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("read_timeout"), 64); err == nil {
|
||||
readTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("write_timeout"), 64); err == nil {
|
||||
writeTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if size, err := strconv.ParseInt(query.Get("block_size"), 10, 64); err == nil {
|
||||
blockSize = int(size)
|
||||
}
|
||||
if altHosts := strings.Split(query.Get("alt_hosts"), ","); len(altHosts) != 0 {
|
||||
for _, host := range altHosts {
|
||||
if len(host) != 0 {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch query.Get("connection_open_strategy") {
|
||||
case "random":
|
||||
connOpenStrategy = connOpenRandom
|
||||
case "in_order":
|
||||
connOpenStrategy = connOpenInOrder
|
||||
case "time_random":
|
||||
connOpenStrategy = connOpenTimeRandom
|
||||
}
|
||||
|
||||
settings, err := makeQuerySettings(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(query.Get("compress")); err == nil {
|
||||
compress = v
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(query.Get("check_connection_liveness")); err == nil {
|
||||
checkConnLiveness = v
|
||||
}
|
||||
if secure {
|
||||
// There is no way to check the liveness of a secure connection, as long as there is no access to raw TCP net.Conn
|
||||
checkConnLiveness = false
|
||||
}
|
||||
|
||||
var (
|
||||
ch = clickhouse{
|
||||
logf: func(string, ...interface{}) {},
|
||||
settings: settings,
|
||||
compress: compress,
|
||||
blockSize: blockSize,
|
||||
checkConnLiveness: checkConnLiveness,
|
||||
ServerInfo: data.ServerInfo{
|
||||
Timezone: time.Local,
|
||||
},
|
||||
}
|
||||
logger = log.New(logOutput, "[clickhouse]", 0)
|
||||
)
|
||||
if debug, err := strconv.ParseBool(url.Query().Get("debug")); err == nil && debug {
|
||||
ch.logf = logger.Printf
|
||||
}
|
||||
ch.logf("host(s)=%s, database=%s, username=%s",
|
||||
strings.Join(hosts, ", "),
|
||||
database,
|
||||
username,
|
||||
)
|
||||
options := connOptions{
|
||||
secure: secure,
|
||||
tlsConfig: tlsConfig,
|
||||
skipVerify: skipVerify,
|
||||
hosts: hosts,
|
||||
connTimeout: connTimeout,
|
||||
readTimeout: readTimeout,
|
||||
writeTimeout: writeTimeout,
|
||||
noDelay: noDelay,
|
||||
openStrategy: connOpenStrategy,
|
||||
logf: ch.logf,
|
||||
}
|
||||
if ch.conn, err = dial(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.SetPrefix(fmt.Sprintf("[clickhouse][connect=%d]", ch.conn.ident))
|
||||
ch.buffer = bufio.NewWriter(ch.conn)
|
||||
|
||||
ch.decoder = binary.NewDecoderWithCompress(ch.conn)
|
||||
ch.encoder = binary.NewEncoderWithCompress(ch.buffer)
|
||||
|
||||
if err := ch.hello(database, username, password); err != nil {
|
||||
ch.conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) hello(database, username, password string) error {
|
||||
ch.logf("[hello] -> %s", ch.ClientInfo)
|
||||
{
|
||||
ch.encoder.Uvarint(protocol.ClientHello)
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
{
|
||||
ch.encoder.String(database)
|
||||
ch.encoder.String(username)
|
||||
ch.encoder.String(password)
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
{
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
return ch.exception()
|
||||
case protocol.ServerHello:
|
||||
if err := ch.ServerInfo.Read(ch.decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[bootstrap] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("[hello] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
ch.logf("[hello] <- %s", ch.ServerInfo)
|
||||
return nil
|
||||
}
|
354
vendor/github.com/ClickHouse/clickhouse-go/clickhouse.go
generated
vendored
354
vendor/github.com/ClickHouse/clickhouse-go/clickhouse.go
generated
vendored
@ -1,354 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/types"
|
||||
)
|
||||
|
||||
type (
|
||||
Date = types.Date
|
||||
DateTime = types.DateTime
|
||||
UUID = types.UUID
|
||||
)
|
||||
|
||||
type ExternalTable struct {
|
||||
Name string
|
||||
Values [][]driver.Value
|
||||
Columns []column.Column
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInsertInNotBatchMode = errors.New("insert statement supported only in the batch mode (use begin/commit)")
|
||||
ErrLimitDataRequestInTx = errors.New("data request has already been prepared in transaction")
|
||||
)
|
||||
|
||||
var (
|
||||
splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`)
|
||||
)
|
||||
|
||||
type logger func(format string, v ...interface{})
|
||||
|
||||
type clickhouse struct {
|
||||
sync.Mutex
|
||||
data.ServerInfo
|
||||
data.ClientInfo
|
||||
logf logger
|
||||
conn *connect
|
||||
block *data.Block
|
||||
buffer *bufio.Writer
|
||||
decoder *binary.Decoder
|
||||
encoder *binary.Encoder
|
||||
settings *querySettings
|
||||
compress bool
|
||||
blockSize int
|
||||
inTransaction bool
|
||||
checkConnLiveness bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Prepare(query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(context.Background(), query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(ctx, query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) prepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
ch.logf("[prepare] %s", query)
|
||||
switch {
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
case ch.block != nil:
|
||||
return nil, ErrLimitDataRequestInTx
|
||||
case isInsert(query):
|
||||
if !ch.inTransaction {
|
||||
return nil, ErrInsertInNotBatchMode
|
||||
}
|
||||
return ch.insert(ctx, query)
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
query: query,
|
||||
numInput: numInput(query),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) insert(ctx context.Context, query string) (_ driver.Stmt, err error) {
|
||||
if err := ch.sendQuery(ctx, splitInsertRe.Split(query, -1)[0]+" VALUES ", nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ch.block, err = ch.readMeta(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
isInsert: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Begin() (driver.Tx, error) {
|
||||
return ch.beginTx(context.Background(), txOptions{})
|
||||
}
|
||||
|
||||
func (ch *clickhouse) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
return ch.beginTx(ctx, txOptions{
|
||||
Isolation: int(opts.Isolation),
|
||||
ReadOnly: opts.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
type txOptions struct {
|
||||
Isolation int
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) beginTx(ctx context.Context, opts txOptions) (*clickhouse, error) {
|
||||
ch.logf("[begin] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
switch {
|
||||
case ch.inTransaction:
|
||||
return nil, sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
|
||||
// Perform a stale connection check. We only perform this check in beginTx,
|
||||
// because database/sql retries driver.ErrBadConn only for first request,
|
||||
// but beginTx doesn't perform any other network interaction.
|
||||
if ch.checkConnLiveness {
|
||||
if err := ch.conn.connCheck(); err != nil {
|
||||
ch.logf("[begin] closing bad idle connection: %w", err)
|
||||
ch.Close()
|
||||
return ch, driver.ErrBadConn
|
||||
}
|
||||
}
|
||||
|
||||
if finish := ch.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.inTransaction = true
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Commit() error {
|
||||
ch.logf("[commit] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
defer func() {
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
ch.block = nil
|
||||
}
|
||||
ch.inTransaction = false
|
||||
}()
|
||||
switch {
|
||||
case !ch.inTransaction:
|
||||
return sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
if ch.block != nil {
|
||||
if err := ch.writeBlock(ch.block, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
// Send empty block as marker of end of data.
|
||||
if err := ch.writeBlock(&data.Block{}, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Rollback() error {
|
||||
ch.logf("[rollback] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
if !ch.inTransaction {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.buffer = nil
|
||||
ch.inTransaction = false
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) CheckNamedValue(nv *driver.NamedValue) error {
|
||||
switch nv.Value.(type) {
|
||||
case ExternalTable, column.IP, column.UUID:
|
||||
return nil
|
||||
case nil, []byte, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, string, time.Time:
|
||||
return nil
|
||||
}
|
||||
switch v := nv.Value.(type) {
|
||||
case
|
||||
[]int, []int8, []int16, []int32, []int64,
|
||||
[]uint, []uint8, []uint16, []uint32, []uint64,
|
||||
[]float32, []float64,
|
||||
[]string:
|
||||
return nil
|
||||
case net.IP, *net.IP:
|
||||
return nil
|
||||
case driver.Valuer:
|
||||
value, err := v.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nv.Value = value
|
||||
default:
|
||||
switch value := reflect.ValueOf(nv.Value); value.Kind() {
|
||||
case reflect.Slice:
|
||||
return nil
|
||||
case reflect.Bool:
|
||||
nv.Value = uint8(0)
|
||||
if value.Bool() {
|
||||
nv.Value = uint8(1)
|
||||
}
|
||||
case reflect.Int8:
|
||||
nv.Value = int8(value.Int())
|
||||
case reflect.Int16:
|
||||
nv.Value = int16(value.Int())
|
||||
case reflect.Int32:
|
||||
nv.Value = int32(value.Int())
|
||||
case reflect.Int64:
|
||||
nv.Value = value.Int()
|
||||
case reflect.Uint8:
|
||||
nv.Value = uint8(value.Uint())
|
||||
case reflect.Uint16:
|
||||
nv.Value = uint16(value.Uint())
|
||||
case reflect.Uint32:
|
||||
nv.Value = uint32(value.Uint())
|
||||
case reflect.Uint64:
|
||||
nv.Value = uint64(value.Uint())
|
||||
case reflect.Float32:
|
||||
nv.Value = float32(value.Float())
|
||||
case reflect.Float64:
|
||||
nv.Value = float64(value.Float())
|
||||
case reflect.String:
|
||||
nv.Value = value.String()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Close() error {
|
||||
ch.block = nil
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) process() error {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
switch packet {
|
||||
case protocol.ServerPong:
|
||||
ch.logf("[process] <- pong")
|
||||
return nil
|
||||
case protocol.ServerException:
|
||||
ch.logf("[process] <- exception")
|
||||
return ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return fmt.Errorf("[process] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
if packet, err = ch.decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *clickhouse) cancel() error {
|
||||
ch.logf("[cancel request]")
|
||||
// even if we fail to write the cancel, we still need to close
|
||||
err := ch.encoder.Uvarint(protocol.ClientCancel)
|
||||
if err == nil {
|
||||
err = ch.encoder.Flush()
|
||||
}
|
||||
// return the close error if there was one, otherwise return the write error
|
||||
if cerr := ch.conn.Close(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ch *clickhouse) watchCancel(ctx context.Context) func() {
|
||||
if done := ctx.Done(); done != nil {
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
ch.cancel()
|
||||
finished <- struct{}{}
|
||||
ch.logf("[cancel] <- done")
|
||||
case <-finished:
|
||||
ch.logf("[cancel] <- finished")
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
select {
|
||||
case <-finished:
|
||||
case finished <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
return func() {}
|
||||
}
|
||||
|
||||
func (ch *clickhouse) ExecContext(ctx context.Context, query string,
|
||||
args []driver.NamedValue) (driver.Result, error) {
|
||||
finish := ch.watchCancel(ctx)
|
||||
defer finish()
|
||||
stmt, err := ch.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dargs := make([]driver.Value, len(args))
|
||||
for i, nv := range args {
|
||||
dargs[i] = nv.Value
|
||||
}
|
||||
return stmt.Exec(dargs)
|
||||
}
|
46
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_exception.go
generated
vendored
46
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_exception.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Exception struct {
|
||||
Code int32
|
||||
Name string
|
||||
Message string
|
||||
StackTrace string
|
||||
nested error
|
||||
}
|
||||
|
||||
func (e *Exception) Error() string {
|
||||
return fmt.Sprintf("code: %d, message: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) exception() error {
|
||||
var (
|
||||
e Exception
|
||||
err error
|
||||
hasNested bool
|
||||
)
|
||||
if e.Code, err = ch.decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Name, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Message, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
e.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+":"))
|
||||
if e.StackTrace, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested, err = ch.decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested {
|
||||
e.nested = ch.exception()
|
||||
}
|
||||
return &e
|
||||
}
|
28
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_ping.go
generated
vendored
28
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_ping.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) Ping(ctx context.Context) error {
|
||||
return ch.ping(ctx)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) ping(ctx context.Context) error {
|
||||
if ch.conn.closed {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
ch.logf("-> ping")
|
||||
finish := ch.watchCancel(ctx)
|
||||
defer finish()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientPing); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
37
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_profile_info.go
generated
vendored
37
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_profile_info.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
type profileInfo struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
blocks uint64
|
||||
appliedLimit bool
|
||||
rowsBeforeLimit uint64
|
||||
calculatedRowsBeforeLimit bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) profileInfo() (*profileInfo, error) {
|
||||
var (
|
||||
p profileInfo
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.blocks, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.appliedLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.rowsBeforeLimit, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.calculatedRowsBeforeLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &p, nil
|
||||
}
|
26
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_progress.go
generated
vendored
26
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_progress.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
type progress struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
totalRows uint64
|
||||
}
|
||||
|
||||
func (ch *clickhouse) progress() (*progress, error) {
|
||||
var (
|
||||
p progress
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.totalRows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
19
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_read_block.go
generated
vendored
19
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_read_block.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readBlock() (*data.Block, error) {
|
||||
if _, err := ch.decoder.String(); err != nil { // temporary table
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch.decoder.SelectCompress(ch.compress)
|
||||
var block data.Block
|
||||
if err := block.Read(&ch.ServerInfo, ch.decoder); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.decoder.SelectCompress(false)
|
||||
return &block, nil
|
||||
}
|
53
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_read_meta.go
generated
vendored
53
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_read_meta.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readMeta() (*data.Block, error) {
|
||||
for {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
ch.logf("[read meta] <- exception")
|
||||
return nil, ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
return block, nil
|
||||
case protocol.ServerEndOfStream:
|
||||
_, err := ch.readBlock()
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil, err
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return nil, fmt.Errorf("[read meta] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
}
|
35
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_send_external_data.go
generated
vendored
35
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_send_external_data.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import "github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
|
||||
func (ch *clickhouse) sendExternalTables(externalTables []ExternalTable) error {
|
||||
ch.logf("[send external tables] count %d", len(externalTables))
|
||||
if externalTables == nil || len(externalTables) == 0 {
|
||||
return nil
|
||||
}
|
||||
block := &data.Block{}
|
||||
sentTables := make(map[string]bool, 0)
|
||||
for _, externalTable := range externalTables {
|
||||
if _, ok := sentTables[externalTable.Name]; ok {
|
||||
continue
|
||||
}
|
||||
ch.logf("[send external table] name %s", externalTable.Name)
|
||||
sentTables[externalTable.Name] = true
|
||||
block.Columns = externalTable.Columns
|
||||
block.NumColumns = uint64(len(externalTable.Columns))
|
||||
for _, row := range externalTable.Values {
|
||||
err := block.AppendRow(row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ch.writeBlock(block, externalTable.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Reset()
|
||||
}
|
||||
return nil
|
||||
}
|
71
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_send_query.go
generated
vendored
71
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_send_query.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) sendQuery(ctx context.Context, query string, externalTables []ExternalTable) error {
|
||||
ch.logf("[send query] %s", query)
|
||||
if err := ch.encoder.Uvarint(protocol.ClientQuery); err != nil {
|
||||
return err
|
||||
}
|
||||
var queryID string
|
||||
queryIDValue := ctx.Value(queryIDKey)
|
||||
if queryIDValue != nil {
|
||||
if queryIdStr, ok := queryIDValue.(string); ok {
|
||||
queryID = queryIdStr
|
||||
}
|
||||
}
|
||||
if err := ch.encoder.String(queryID); err != nil {
|
||||
return err
|
||||
}
|
||||
{ // client info
|
||||
ch.encoder.Uvarint(1)
|
||||
ch.encoder.String("")
|
||||
ch.encoder.String("")
|
||||
ch.encoder.String("[::ffff:127.0.0.1]:0")
|
||||
ch.encoder.Uvarint(1) // iface type TCP
|
||||
ch.encoder.String(hostname)
|
||||
ch.encoder.String(hostname)
|
||||
}
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
if ch.ServerInfo.Revision >= protocol.DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO {
|
||||
ch.encoder.String("")
|
||||
}
|
||||
|
||||
// the settings are written as list of contiguous name-value pairs, finished with empty name
|
||||
if !ch.settings.IsEmpty() {
|
||||
ch.logf("[query settings] %s", ch.settings.settingsStr)
|
||||
if err := ch.settings.Serialize(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// empty string is a marker of the end of the settings
|
||||
if err := ch.encoder.String(""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Uvarint(protocol.StateComplete); err != nil {
|
||||
return err
|
||||
}
|
||||
compress := protocol.CompressDisable
|
||||
if ch.compress {
|
||||
compress = protocol.CompressEnable
|
||||
}
|
||||
if err := ch.encoder.Uvarint(compress); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.String(query); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.sendExternalTables(externalTables); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.writeBlock(&data.Block{}, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.encoder.Flush()
|
||||
}
|
40
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_write_block.go
generated
vendored
40
vendor/github.com/ClickHouse/clickhouse-go/clickhouse_write_block.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) writeBlock(block *data.Block, tableName string) error {
|
||||
ch.Lock()
|
||||
defer ch.Unlock()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ch.encoder.String(tableName); err != nil { // temporary table
|
||||
return err
|
||||
}
|
||||
|
||||
// implement CityHash v 1.0.2 and add LZ4 compression
|
||||
/*
|
||||
From Alexey Milovidov
|
||||
Насколько я помню, сжимаются блоки с данными Native формата, а всё остальное (всякие номера пакетов и т. п.) передаётся без сжатия.
|
||||
|
||||
Сжатые данные устроены так. Они представляют собой набор сжатых фреймов.
|
||||
Каждый фрейм имеет следующий вид:
|
||||
чексумма (16 байт),
|
||||
идентификатор алгоритма сжатия (1 байт),
|
||||
размер сжатых данных (4 байта, little endian, размер не включает в себя чексумму, но включает в себя остальные 9 байт заголовка),
|
||||
размер несжатых данных (4 байта, little endian), затем сжатые данные.
|
||||
Идентификатор алгоритма: 0x82 - lz4, 0x90 - zstd.
|
||||
Чексумма - CityHash128 из CityHash версии 1.0.2, вычисленный от сжатых данных с учётом 9 байт заголовка.
|
||||
|
||||
См. CompressedReadBufferBase, CompressedWriteBuffer,
|
||||
utils/compressor, TCPHandler.
|
||||
*/
|
||||
ch.encoder.SelectCompress(ch.compress)
|
||||
err := block.Write(&ch.ServerInfo, ch.encoder)
|
||||
ch.encoder.SelectCompress(false)
|
||||
return err
|
||||
}
|
225
vendor/github.com/ClickHouse/clickhouse-go/connect.go
generated
vendored
225
vendor/github.com/ClickHouse/clickhouse-go/connect.go
generated
vendored
@ -1,225 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var tick int32
|
||||
|
||||
type openStrategy int8
|
||||
|
||||
func (s openStrategy) String() string {
|
||||
switch s {
|
||||
case connOpenInOrder:
|
||||
return "in_order"
|
||||
case connOpenTimeRandom:
|
||||
return "time_random"
|
||||
}
|
||||
return "random"
|
||||
}
|
||||
|
||||
const (
|
||||
connOpenRandom openStrategy = iota + 1
|
||||
connOpenInOrder
|
||||
connOpenTimeRandom
|
||||
)
|
||||
|
||||
type connOptions struct {
|
||||
secure, skipVerify bool
|
||||
tlsConfig *tls.Config
|
||||
hosts []string
|
||||
connTimeout, readTimeout, writeTimeout time.Duration
|
||||
noDelay bool
|
||||
openStrategy openStrategy
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
// DialFunc is a function which can be used to establish the network connection.
|
||||
// Custom dial functions must be registered with RegisterDial
|
||||
type DialFunc func(network, address string, timeout time.Duration, config *tls.Config) (net.Conn, error)
|
||||
|
||||
var (
|
||||
customDialLock sync.RWMutex
|
||||
customDial DialFunc
|
||||
)
|
||||
|
||||
// RegisterDial registers a custom dial function.
|
||||
func RegisterDial(dial DialFunc) {
|
||||
customDialLock.Lock()
|
||||
customDial = dial
|
||||
customDialLock.Unlock()
|
||||
}
|
||||
|
||||
// DeregisterDial deregisters the custom dial function.
|
||||
func DeregisterDial() {
|
||||
customDialLock.Lock()
|
||||
customDial = nil
|
||||
customDialLock.Unlock()
|
||||
}
|
||||
func dial(options connOptions) (*connect, error) {
|
||||
var (
|
||||
err error
|
||||
abs = func(v int) int {
|
||||
if v < 0 {
|
||||
return -1 * v
|
||||
}
|
||||
return v
|
||||
}
|
||||
conn net.Conn
|
||||
ident = abs(int(atomic.AddInt32(&tick, 1)))
|
||||
)
|
||||
tlsConfig := options.tlsConfig
|
||||
if options.secure {
|
||||
if tlsConfig == nil {
|
||||
tlsConfig = &tls.Config{}
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = options.skipVerify
|
||||
}
|
||||
checkedHosts := make(map[int]struct{}, len(options.hosts))
|
||||
for i := range options.hosts {
|
||||
var num int
|
||||
switch options.openStrategy {
|
||||
case connOpenInOrder:
|
||||
num = i
|
||||
case connOpenRandom:
|
||||
num = (ident + i) % len(options.hosts)
|
||||
case connOpenTimeRandom:
|
||||
// select host based on milliseconds
|
||||
num = int((time.Now().UnixNano()/1000)%1000) % len(options.hosts)
|
||||
for _, ok := checkedHosts[num]; ok; _, ok = checkedHosts[num] {
|
||||
num = int(time.Now().UnixNano()) % len(options.hosts)
|
||||
}
|
||||
checkedHosts[num] = struct{}{}
|
||||
}
|
||||
customDialLock.RLock()
|
||||
cd := customDial
|
||||
customDialLock.RUnlock()
|
||||
switch {
|
||||
case options.secure:
|
||||
if cd != nil {
|
||||
conn, err = cd("tcp", options.hosts[num], options.connTimeout, tlsConfig)
|
||||
} else {
|
||||
conn, err = tls.DialWithDialer(
|
||||
&net.Dialer{
|
||||
Timeout: options.connTimeout,
|
||||
},
|
||||
"tcp",
|
||||
options.hosts[num],
|
||||
tlsConfig,
|
||||
)
|
||||
}
|
||||
default:
|
||||
if cd != nil {
|
||||
conn, err = cd("tcp", options.hosts[num], options.connTimeout, nil)
|
||||
} else {
|
||||
conn, err = net.DialTimeout("tcp", options.hosts[num], options.connTimeout)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
options.logf(
|
||||
"[dial] secure=%t, skip_verify=%t, strategy=%s, ident=%d, server=%d -> %s",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
num,
|
||||
conn.RemoteAddr(),
|
||||
)
|
||||
if tcp, ok := conn.(*net.TCPConn); ok {
|
||||
err = tcp.SetNoDelay(options.noDelay) // Disable or enable the Nagle Algorithm for this tcp socket
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &connect{
|
||||
Conn: conn,
|
||||
logf: options.logf,
|
||||
ident: ident,
|
||||
buffer: bufio.NewReader(conn),
|
||||
readTimeout: options.readTimeout,
|
||||
writeTimeout: options.writeTimeout,
|
||||
}, nil
|
||||
} else {
|
||||
options.logf(
|
||||
"[dial err] secure=%t, skip_verify=%t, strategy=%s, ident=%d, addr=%s\n%#v",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
options.hosts[num],
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type connect struct {
|
||||
net.Conn
|
||||
logf func(string, ...interface{})
|
||||
ident int
|
||||
buffer *bufio.Reader
|
||||
closed bool
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
lastReadDeadlineTime time.Time
|
||||
lastWriteDeadlineTime time.Time
|
||||
}
|
||||
|
||||
func (conn *connect) Read(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
dstLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.readTimeout != 0 && currentTime.Sub(conn.lastReadDeadlineTime) > (conn.readTimeout>>2) {
|
||||
conn.SetReadDeadline(time.Now().Add(conn.readTimeout))
|
||||
conn.lastReadDeadlineTime = currentTime
|
||||
}
|
||||
for total < dstLen {
|
||||
if n, err = conn.buffer.Read(b[total:]); err != nil {
|
||||
conn.logf("[connect] read error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Write(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
srcLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.writeTimeout != 0 && currentTime.Sub(conn.lastWriteDeadlineTime) > (conn.writeTimeout>>2) {
|
||||
conn.SetWriteDeadline(time.Now().Add(conn.writeTimeout))
|
||||
conn.lastWriteDeadlineTime = currentTime
|
||||
}
|
||||
for total < srcLen {
|
||||
if n, err = conn.Conn.Write(b[total:]); err != nil {
|
||||
conn.logf("[connect] write error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Close() error {
|
||||
if !conn.closed {
|
||||
conn.closed = true
|
||||
return conn.Conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
57
vendor/github.com/ClickHouse/clickhouse-go/connect_check.go
generated
vendored
57
vendor/github.com/ClickHouse/clickhouse-go/connect_check.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
|
||||
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errUnexpectedRead = errors.New("unexpected read from socket")
|
||||
|
||||
func (conn *connect) connCheck() error {
|
||||
var sysErr error
|
||||
|
||||
sysConn, ok := conn.Conn.(syscall.Conn)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
rawConn, err := sysConn.SyscallConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If this connection has a ReadTimeout which we've been setting on
|
||||
// reads, reset it to zero value before we attempt a non-blocking
|
||||
// read, otherwise we may get os.ErrDeadlineExceeded for the cached
|
||||
// connection from the pool with an expired timeout.
|
||||
if conn.readTimeout != 0 {
|
||||
err = conn.SetReadDeadline(time.Time{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("set read deadline: %w", err)
|
||||
}
|
||||
conn.lastReadDeadlineTime = time.Time{}
|
||||
}
|
||||
err = rawConn.Read(func(fd uintptr) bool {
|
||||
var buf [1]byte
|
||||
n, err := syscall.Read(int(fd), buf[:])
|
||||
switch {
|
||||
case n == 0 && err == nil:
|
||||
sysErr = io.EOF
|
||||
case n > 0:
|
||||
sysErr = errUnexpectedRead
|
||||
case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
|
||||
sysErr = nil
|
||||
default:
|
||||
sysErr = err
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sysErr
|
||||
}
|
7
vendor/github.com/ClickHouse/clickhouse-go/connect_check_dummy.go
generated
vendored
7
vendor/github.com/ClickHouse/clickhouse-go/connect_check_dummy.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
|
||||
|
||||
package clickhouse
|
||||
|
||||
func (conn *connect) connCheck() error {
|
||||
return nil
|
||||
}
|
9
vendor/github.com/ClickHouse/clickhouse-go/docker-compose.yml
generated
vendored
9
vendor/github.com/ClickHouse/clickhouse-go/docker-compose.yml
generated
vendored
@ -1,9 +0,0 @@
|
||||
---
|
||||
version: '3'
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
ports:
|
||||
- 127.0.0.1:8123:8123
|
||||
- 127.0.0.1:9000:9000
|
||||
- 127.0.0.1:9009:9009
|
12
vendor/github.com/ClickHouse/clickhouse-go/go.test.sh
generated
vendored
12
vendor/github.com/ClickHouse/clickhouse-go/go.test.sh
generated
vendored
@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor | grep -v examples); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
144
vendor/github.com/ClickHouse/clickhouse-go/helpers.go
generated
vendored
144
vendor/github.com/ClickHouse/clickhouse-go/helpers.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func numInput(query string) int {
|
||||
|
||||
var (
|
||||
count int
|
||||
args = make(map[string]struct{})
|
||||
reader = bytes.NewReader([]byte(query))
|
||||
quote, gravis bool
|
||||
escape bool
|
||||
keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
offset = newMatcher("offset")
|
||||
between = newMatcher("between")
|
||||
in = newMatcher("in")
|
||||
and = newMatcher("and")
|
||||
from = newMatcher("from")
|
||||
join = newMatcher("join")
|
||||
subSelect = newMatcher("select")
|
||||
)
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
if escape {
|
||||
escape = false
|
||||
continue
|
||||
}
|
||||
switch char {
|
||||
case '\\':
|
||||
if gravis || quote {
|
||||
escape = true
|
||||
}
|
||||
case '\'':
|
||||
if !gravis {
|
||||
quote = !quote
|
||||
}
|
||||
case '`':
|
||||
if !quote {
|
||||
gravis = !gravis
|
||||
}
|
||||
}
|
||||
if quote || gravis {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case char == '?' && keyword:
|
||||
count++
|
||||
case char == '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
if _, found := args[param]; !found {
|
||||
args[param] = struct{}{}
|
||||
count++
|
||||
}
|
||||
}
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '[',
|
||||
char == '%':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || offset.matchRune(char) || like.matchRune(char) ||
|
||||
in.matchRune(char) || from.matchRune(char) || join.matchRune(char) || subSelect.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && (char == ' ' || char == '\t' || char == '\n')
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func paramParser(reader *bytes.Reader) string {
|
||||
var name bytes.Buffer
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
if char == '_' || char >= '0' && char <= '9' || 'a' <= char && char <= 'z' || 'A' <= char && char <= 'Z' {
|
||||
name.WriteRune(char)
|
||||
} else {
|
||||
reader.UnreadRune()
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return name.String()
|
||||
}
|
||||
|
||||
var selectRe = regexp.MustCompile(`\s+SELECT\s+`)
|
||||
|
||||
func isInsert(query string) bool {
|
||||
if f := strings.Fields(query); len(f) > 2 {
|
||||
return strings.EqualFold("INSERT", f[0]) && strings.EqualFold("INTO", f[1]) && !selectRe.MatchString(strings.ToUpper(query))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func quote(v driver.Value) string {
|
||||
switch v := reflect.ValueOf(v); v.Kind() {
|
||||
case reflect.Slice:
|
||||
values := make([]string, 0, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
values = append(values, quote(v.Index(i).Interface()))
|
||||
}
|
||||
return strings.Join(values, ", ")
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(v) + "'"
|
||||
case time.Time:
|
||||
return formatTime(v)
|
||||
case nil:
|
||||
return "null"
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
func formatTime(v time.Time) string {
|
||||
return v.Format("toDateTime('2006-01-02 15:04:05', '" + v.Location().String() + "')")
|
||||
}
|
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader.go
generated
vendored
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/lz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
_, err = lz4.Decode(cr.data, cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader_clz4.go
generated
vendored
107
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_reader_clz4.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
err = lz4.Uncompress(cr.zdata, cr.data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
21
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_settings.go
generated
vendored
21
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_settings.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package binary
|
||||
|
||||
type CompressionMethodByte byte
|
||||
|
||||
const (
|
||||
NONE CompressionMethodByte = 0x02
|
||||
LZ4 = 0x82
|
||||
ZSTD = 0x90
|
||||
)
|
||||
|
||||
const (
|
||||
// ChecksumSize is 128bits for cityhash102 checksum
|
||||
ChecksumSize = 16
|
||||
// CompressHeader magic + compressed_size + uncompressed_size
|
||||
CompressHeaderSize = 1 + 4 + 4
|
||||
|
||||
// HeaderSize
|
||||
HeaderSize = ChecksumSize + CompressHeaderSize
|
||||
// BlockMaxSize 1MB
|
||||
BlockMaxSize = 1 << 20
|
||||
)
|
79
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_writer.go
generated
vendored
79
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_writer.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/cityhash102"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/lz4"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Encode(cw.zdata[HeaderSize:], cw.data[:cw.pos])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
78
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_writer_clz4.go
generated
vendored
78
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/compress_writer_clz4.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/cityhash102"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Compress(cw.data[:cw.pos], cw.zdata[HeaderSize:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
177
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/decoder.go
generated
vendored
177
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/decoder.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
func NewDecoder(input io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
input: input,
|
||||
}
|
||||
}
|
||||
|
||||
func NewDecoderWithCompress(input io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
input: input,
|
||||
compressInput: NewCompressReader(input),
|
||||
}
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
compress bool
|
||||
input io.Reader
|
||||
compressInput io.Reader
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (decoder *Decoder) SelectCompress(compress bool) {
|
||||
decoder.compress = compress
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Get() io.Reader {
|
||||
if decoder.compress && decoder.compressInput != nil {
|
||||
return decoder.compressInput
|
||||
}
|
||||
return decoder.input
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Bool() (bool, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return v == 1, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Uvarint() (uint64, error) {
|
||||
return binary.ReadUvarint(decoder)
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int8() (int8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int16() (int16, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int16(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int32() (int32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int64() (int64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt8() (uint8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt16() (uint16, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:2]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(decoder.scratch[0]) | uint16(decoder.scratch[1])<<8, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt32() (uint32, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:4]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(decoder.scratch[0]) |
|
||||
uint32(decoder.scratch[1])<<8 |
|
||||
uint32(decoder.scratch[2])<<16 |
|
||||
uint32(decoder.scratch[3])<<24, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt64() (uint64, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:8]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(decoder.scratch[0]) |
|
||||
uint64(decoder.scratch[1])<<8 |
|
||||
uint64(decoder.scratch[2])<<16 |
|
||||
uint64(decoder.scratch[3])<<24 |
|
||||
uint64(decoder.scratch[4])<<32 |
|
||||
uint64(decoder.scratch[5])<<40 |
|
||||
uint64(decoder.scratch[6])<<48 |
|
||||
uint64(decoder.scratch[7])<<56, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float32() (float32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float32frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float64() (float64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float64frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Fixed(ln int) ([]byte, error) {
|
||||
if reader, ok := decoder.Get().(FixedReader); ok {
|
||||
return reader.Fixed(ln)
|
||||
}
|
||||
buf := make([]byte, ln)
|
||||
if _, err := decoder.Get().Read(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) String() (string, error) {
|
||||
strlen, err := decoder.Uvarint()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str, err := decoder.Fixed(int(strlen))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(str), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Decimal128() ([]byte, error) {
|
||||
bytes := make([]byte, 16)
|
||||
_, err := decoder.Get().Read(bytes)
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
func (decoder *Decoder) ReadByte() (byte, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:1]); err != nil {
|
||||
return 0x0, err
|
||||
}
|
||||
return decoder.scratch[0], nil
|
||||
}
|
||||
|
||||
type FixedReader interface {
|
||||
Fixed(ln int) ([]byte, error)
|
||||
}
|
199
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/encoder.go
generated
vendored
199
vendor/github.com/ClickHouse/clickhouse-go/lib/binary/encoder.go
generated
vendored
@ -1,199 +0,0 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
output: w,
|
||||
}
|
||||
}
|
||||
|
||||
func NewEncoderWithCompress(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
output: w,
|
||||
compressOutput: NewCompressWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type Encoder struct {
|
||||
compress bool
|
||||
output io.Writer
|
||||
compressOutput io.Writer
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (enc *Encoder) SelectCompress(compress bool) {
|
||||
if enc.compressOutput == nil {
|
||||
return
|
||||
}
|
||||
if enc.compress && !compress {
|
||||
enc.Flush()
|
||||
}
|
||||
enc.compress = compress
|
||||
}
|
||||
|
||||
func (enc *Encoder) Get() io.Writer {
|
||||
if enc.compress && enc.compressOutput != nil {
|
||||
return enc.compressOutput
|
||||
}
|
||||
return enc.output
|
||||
}
|
||||
|
||||
func (enc *Encoder) Uvarint(v uint64) error {
|
||||
ln := binary.PutUvarint(enc.scratch[:binary.MaxVarintLen64], v)
|
||||
if _, err := enc.Get().Write(enc.scratch[0:ln]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Bool(v bool) error {
|
||||
if v {
|
||||
return enc.UInt8(1)
|
||||
}
|
||||
return enc.UInt8(0)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int8(v int8) error {
|
||||
return enc.UInt8(uint8(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int16(v int16) error {
|
||||
return enc.UInt16(uint16(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int32(v int32) error {
|
||||
return enc.UInt32(uint32(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int64(v int64) error {
|
||||
return enc.UInt64(uint64(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt8(v uint8) error {
|
||||
enc.scratch[0] = v
|
||||
if _, err := enc.Get().Write(enc.scratch[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt16(v uint16) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
if _, err := enc.Get().Write(enc.scratch[:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt32(v uint32) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
if _, err := enc.Get().Write(enc.scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt64(v uint64) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
enc.scratch[4] = byte(v >> 32)
|
||||
enc.scratch[5] = byte(v >> 40)
|
||||
enc.scratch[6] = byte(v >> 48)
|
||||
enc.scratch[7] = byte(v >> 56)
|
||||
if _, err := enc.Get().Write(enc.scratch[:8]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float32(v float32) error {
|
||||
return enc.UInt32(math.Float32bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float64(v float64) error {
|
||||
return enc.UInt64(math.Float64bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) String(v string) error {
|
||||
str := Str2Bytes(v)
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) RawString(str []byte) error {
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Decimal128(bytes []byte) error {
|
||||
_, err := enc.Get().Write(bytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (enc *Encoder) Write(b []byte) (int, error) {
|
||||
return enc.Get().Write(b)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Flush() error {
|
||||
if w, ok := enc.Get().(WriteFlusher); ok {
|
||||
return w.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WriteFlusher interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
func Str2Bytes(str string) []byte {
|
||||
// Copied from https://github.com/m3db/m3/blob/master/src/x/unsafe/string.go#L62
|
||||
if len(str) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need to declare a real byte slice so internally the compiler
|
||||
// knows to use an unsafe.Pointer to keep track of the underlying memory so that
|
||||
// once the slice's array pointer is updated with the pointer to the string's
|
||||
// underlying bytes, the compiler won't prematurely GC the memory when the string
|
||||
// goes out of scope.
|
||||
var b []byte
|
||||
byteHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
|
||||
// This makes sure that even if GC relocates the string's underlying
|
||||
// memory after this assignment, the corresponding unsafe.Pointer in the internal
|
||||
// slice struct will be updated accordingly to reflect the memory relocation.
|
||||
byteHeader.Data = (*reflect.StringHeader)(unsafe.Pointer(&str)).Data
|
||||
|
||||
// It is important that we access str after we assign the Data
|
||||
// pointer of the string header to the Data pointer of the slice header to
|
||||
// make sure the string (and the underlying bytes backing the string) don't get
|
||||
// GC'ed before the assignment happens.
|
||||
l := len(str)
|
||||
byteHeader.Len = l
|
||||
byteHeader.Cap = l
|
||||
|
||||
return b
|
||||
}
|
45
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/city64.go
generated
vendored
45
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/city64.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
type City64 struct {
|
||||
s []byte
|
||||
}
|
||||
|
||||
var _ hash.Hash64 = (*City64)(nil)
|
||||
var _ hash.Hash = (*City64)(nil)
|
||||
|
||||
func New64() hash.Hash64 {
|
||||
return &City64{}
|
||||
}
|
||||
|
||||
func (this *City64) Sum(b []byte) []byte {
|
||||
b2 := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b2, this.Sum64())
|
||||
b = append(b, b2...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (this *City64) Sum64() uint64 {
|
||||
return CityHash64(this.s, uint32(len(this.s)))
|
||||
}
|
||||
|
||||
func (this *City64) Reset() {
|
||||
this.s = this.s[0:0]
|
||||
}
|
||||
|
||||
func (this *City64) BlockSize() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (this *City64) Write(s []byte) (n int, err error) {
|
||||
this.s = append(this.s, s...)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func (this *City64) Size() int {
|
||||
return 8
|
||||
}
|
383
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/cityhash.go
generated
vendored
383
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/cityhash.go
generated
vendored
@ -1,383 +0,0 @@
|
||||
/*
|
||||
* Go implementation of Google city hash (MIT license)
|
||||
* https://code.google.com/p/cityhash/
|
||||
*
|
||||
* MIT License http://www.opensource.org/licenses/mit-license.php
|
||||
*
|
||||
* I don't even want to pretend to understand the details of city hash.
|
||||
* I am only reproducing the logic in Go as faithfully as I can.
|
||||
*
|
||||
*/
|
||||
|
||||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
k0 uint64 = 0xc3a5c85c97cb3127
|
||||
k1 uint64 = 0xb492b66fbe98f273
|
||||
k2 uint64 = 0x9ae16a3b2f90404f
|
||||
k3 uint64 = 0xc949d7c7509e6557
|
||||
|
||||
kMul uint64 = 0x9ddfea08eb382d69
|
||||
)
|
||||
|
||||
func fetch64(p []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(p)
|
||||
//return uint64InExpectedOrder(unalignedLoad64(p))
|
||||
}
|
||||
|
||||
func fetch32(p []byte) uint32 {
|
||||
return binary.LittleEndian.Uint32(p)
|
||||
//return uint32InExpectedOrder(unalignedLoad32(p))
|
||||
}
|
||||
|
||||
func rotate64(val uint64, shift uint32) uint64 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (64 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func rotate32(val uint32, shift uint32) uint32 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (32 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func swap64(a, b *uint64) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func swap32(a, b *uint32) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func permute3(a, b, c *uint32) {
|
||||
swap32(a, b)
|
||||
swap32(a, c)
|
||||
}
|
||||
|
||||
func rotate64ByAtLeast1(val uint64, shift uint32) uint64 {
|
||||
return (val >> shift) | (val << (64 - shift))
|
||||
}
|
||||
|
||||
func shiftMix(val uint64) uint64 {
|
||||
return val ^ (val >> 47)
|
||||
}
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
func (this *Uint128) setLower64(l uint64) {
|
||||
this[0] = l
|
||||
}
|
||||
|
||||
func (this *Uint128) setHigher64(h uint64) {
|
||||
this[1] = h
|
||||
}
|
||||
|
||||
func (this Uint128) Lower64() uint64 {
|
||||
return this[0]
|
||||
}
|
||||
|
||||
func (this Uint128) Higher64() uint64 {
|
||||
return this[1]
|
||||
}
|
||||
|
||||
func (this Uint128) Bytes() []byte {
|
||||
b := make([]byte, 16)
|
||||
binary.LittleEndian.PutUint64(b, this[0])
|
||||
binary.LittleEndian.PutUint64(b[8:], this[1])
|
||||
return b
|
||||
}
|
||||
|
||||
func hash128to64(x Uint128) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (x.Lower64() ^ x.Higher64()) * kMul
|
||||
a ^= (a >> 47)
|
||||
var b = (x.Higher64() ^ a) * kMul
|
||||
b ^= (b >> 47)
|
||||
b *= kMul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen16(u, v uint64) uint64 {
|
||||
return hash128to64(Uint128{u, v})
|
||||
}
|
||||
|
||||
func hashLen16_3(u, v, mul uint64) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (u ^ v) * mul
|
||||
a ^= (a >> 47)
|
||||
var b = (v ^ a) * mul
|
||||
b ^= (b >> 47)
|
||||
b *= mul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen0to16(s []byte, length uint32) uint64 {
|
||||
if length > 8 {
|
||||
var a = fetch64(s)
|
||||
var b = fetch64(s[length-8:])
|
||||
|
||||
return hashLen16(a, rotate64ByAtLeast1(b+uint64(length), length)) ^ b
|
||||
}
|
||||
|
||||
if length >= 4 {
|
||||
var a = fetch32(s)
|
||||
return hashLen16(uint64(length)+(uint64(a)<<3), uint64(fetch32(s[length-4:])))
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
var a uint8 = uint8(s[0])
|
||||
var b uint8 = uint8(s[length>>1])
|
||||
var c uint8 = uint8(s[length-1])
|
||||
|
||||
var y uint32 = uint32(a) + (uint32(b) << 8)
|
||||
var z uint32 = length + (uint32(c) << 2)
|
||||
|
||||
return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2
|
||||
}
|
||||
|
||||
return k2
|
||||
}
|
||||
|
||||
// This probably works well for 16-byte strings as well, but it may be overkill
|
||||
func hashLen17to32(s []byte, length uint32) uint64 {
|
||||
var a = fetch64(s) * k1
|
||||
var b = fetch64(s[8:])
|
||||
var c = fetch64(s[length-8:]) * k2
|
||||
var d = fetch64(s[length-16:]) * k0
|
||||
|
||||
return hashLen16(rotate64(a-b, 43)+rotate64(c, 30)+d,
|
||||
a+rotate64(b^k3, 20)-c+uint64(length))
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds(w, x, y, z, a, b uint64) Uint128 {
|
||||
a += w
|
||||
b = rotate64(b+a+z, 21)
|
||||
var c uint64 = a
|
||||
a += x
|
||||
a += y
|
||||
b += rotate64(a, 44)
|
||||
return Uint128{a + z, b + c}
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds_3(s []byte, a, b uint64) Uint128 {
|
||||
return weakHashLen32WithSeeds(fetch64(s), fetch64(s[8:]), fetch64(s[16:]), fetch64(s[24:]), a, b)
|
||||
}
|
||||
|
||||
func hashLen33to64(s []byte, length uint32) uint64 {
|
||||
var z uint64 = fetch64(s[24:])
|
||||
var a uint64 = fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0
|
||||
var b uint64 = rotate64(a+z, 52)
|
||||
var c uint64 = rotate64(a, 37)
|
||||
|
||||
a += fetch64(s[8:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[16:])
|
||||
|
||||
var vf uint64 = a + z
|
||||
var vs = b + rotate64(a, 31) + c
|
||||
|
||||
a = fetch64(s[16:]) + fetch64(s[length-32:])
|
||||
z = fetch64(s[length-8:])
|
||||
b = rotate64(a+z, 52)
|
||||
c = rotate64(a, 37)
|
||||
a += fetch64(s[length-24:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[length-16:])
|
||||
|
||||
wf := a + z
|
||||
ws := b + rotate64(a, 31) + c
|
||||
r := shiftMix((vf+ws)*k2 + (wf+vs)*k0)
|
||||
return shiftMix(r*k0+vs) * k2
|
||||
}
|
||||
|
||||
func CityHash64(s []byte, length uint32) uint64 {
|
||||
if length <= 32 {
|
||||
if length <= 16 {
|
||||
return hashLen0to16(s, length)
|
||||
} else {
|
||||
return hashLen17to32(s, length)
|
||||
}
|
||||
} else if length <= 64 {
|
||||
return hashLen33to64(s, length)
|
||||
}
|
||||
|
||||
var x uint64 = fetch64(s)
|
||||
var y uint64 = fetch64(s[length-16:]) ^ k1
|
||||
var z uint64 = fetch64(s[length-56:]) ^ k0
|
||||
|
||||
var v Uint128 = weakHashLen32WithSeeds_3(s[length-64:], uint64(length), y)
|
||||
var w Uint128 = weakHashLen32WithSeeds_3(s[length-32:], uint64(length)*k1, k0)
|
||||
|
||||
z += shiftMix(v.Higher64()) * k1
|
||||
x = rotate64(z+x, 39) * k1
|
||||
y = rotate64(y, 33) * k1
|
||||
|
||||
length = (length - 1) & ^uint32(63)
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
length -= 64
|
||||
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return hashLen16(hashLen16(v.Lower64(), w.Lower64())+shiftMix(y)*k1+z, hashLen16(v.Higher64(), w.Higher64())+x)
|
||||
}
|
||||
|
||||
func CityHash64WithSeed(s []byte, length uint32, seed uint64) uint64 {
|
||||
return CityHash64WithSeeds(s, length, k2, seed)
|
||||
}
|
||||
|
||||
func CityHash64WithSeeds(s []byte, length uint32, seed0, seed1 uint64) uint64 {
|
||||
return hashLen16(CityHash64(s, length)-seed0, seed1)
|
||||
}
|
||||
|
||||
func cityMurmur(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
var a uint64 = seed.Lower64()
|
||||
var b uint64 = seed.Higher64()
|
||||
var c uint64 = 0
|
||||
var d uint64 = 0
|
||||
var l int32 = int32(length) - 16
|
||||
|
||||
if l <= 0 { // len <= 16
|
||||
a = shiftMix(a*k1) * k1
|
||||
c = b*k1 + hashLen0to16(s, length)
|
||||
|
||||
if length >= 8 {
|
||||
d = shiftMix(a + fetch64(s))
|
||||
} else {
|
||||
d = shiftMix(a + c)
|
||||
}
|
||||
|
||||
} else { // len > 16
|
||||
c = hashLen16(fetch64(s[length-8:])+k1, a)
|
||||
d = hashLen16(b+uint64(length), c+fetch64(s[length-16:]))
|
||||
a += d
|
||||
|
||||
for {
|
||||
a ^= shiftMix(fetch64(s)*k1) * k1
|
||||
a *= k1
|
||||
b ^= a
|
||||
c ^= shiftMix(fetch64(s[8:])*k1) * k1
|
||||
c *= k1
|
||||
d ^= c
|
||||
s = s[16:]
|
||||
l -= 16
|
||||
|
||||
if l <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
a = hashLen16(a, c)
|
||||
b = hashLen16(d, b)
|
||||
return Uint128{a ^ b, hashLen16(b, a)}
|
||||
}
|
||||
|
||||
func CityHash128WithSeed(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
if length < 128 {
|
||||
return cityMurmur(s, length, seed)
|
||||
}
|
||||
|
||||
// We expect length >= 128 to be the common case. Keep 56 bytes of state:
|
||||
// v, w, x, y, and z.
|
||||
var v, w Uint128
|
||||
var x uint64 = seed.Lower64()
|
||||
var y uint64 = seed.Higher64()
|
||||
var z uint64 = uint64(length) * k1
|
||||
|
||||
var pos uint32
|
||||
var t = s
|
||||
|
||||
v.setLower64(rotate64(y^k1, 49)*k1 + fetch64(s))
|
||||
v.setHigher64(rotate64(v.Lower64(), 42)*k1 + fetch64(s[8:]))
|
||||
w.setLower64(rotate64(y+z, 35)*k1 + x)
|
||||
w.setHigher64(rotate64(x+fetch64(s[88:]), 53) * k1)
|
||||
|
||||
// This is the same inner loop as CityHash64(), manually unrolled.
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
length -= 128
|
||||
|
||||
if length < 128 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
y += rotate64(w.Lower64(), 37)*k0 + z
|
||||
x += rotate64(v.Lower64()+z, 49) * k0
|
||||
|
||||
// If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s.
|
||||
var tailDone uint32
|
||||
for tailDone = 0; tailDone < length; {
|
||||
tailDone += 32
|
||||
y = rotate64(y-x, 42)*k0 + v.Higher64()
|
||||
|
||||
//TODO why not use origin_len ?
|
||||
w.setLower64(w.Lower64() + fetch64(t[pos+length-tailDone+16:]))
|
||||
x = rotate64(x, 49)*k0 + w.Lower64()
|
||||
w.setLower64(w.Lower64() + v.Lower64())
|
||||
v = weakHashLen32WithSeeds_3(t[pos+length-tailDone:], v.Lower64(), v.Higher64())
|
||||
}
|
||||
// At this point our 48 bytes of state should contain more than
|
||||
// enough information for a strong 128-bit hash. We use two
|
||||
// different 48-byte-to-8-byte hashes to get a 16-byte final result.
|
||||
x = hashLen16(x, v.Lower64())
|
||||
y = hashLen16(y, w.Lower64())
|
||||
|
||||
return Uint128{hashLen16(x+v.Higher64(), w.Higher64()) + y,
|
||||
hashLen16(x+w.Higher64(), y+v.Higher64())}
|
||||
}
|
||||
|
||||
func CityHash128(s []byte, length uint32) (result Uint128) {
|
||||
if length >= 16 {
|
||||
result = CityHash128WithSeed(s[16:length], length-16, Uint128{fetch64(s) ^ k3, fetch64(s[8:])})
|
||||
} else if length >= 8 {
|
||||
result = CityHash128WithSeed(nil, 0, Uint128{fetch64(s) ^ (uint64(length) * k0), fetch64(s[length-8:]) ^ k1})
|
||||
} else {
|
||||
result = CityHash128WithSeed(s, length, Uint128{k0, k1})
|
||||
}
|
||||
return
|
||||
}
|
5
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/doc.go
generated
vendored
5
vendor/github.com/ClickHouse/clickhouse-go/lib/cityhash102/doc.go
generated
vendored
@ -1,5 +0,0 @@
|
||||
/** COPY from https://github.com/zentures/cityhash/
|
||||
|
||||
NOTE: The code is modified to be compatible with CityHash128 used in ClickHouse
|
||||
*/
|
||||
package cityhash102
|
270
vendor/github.com/ClickHouse/clickhouse-go/lib/column/array.go
generated
vendored
270
vendor/github.com/ClickHouse/clickhouse-go/lib/column/array.go
generated
vendored
@ -1,270 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type columnDecoder func() (interface{}, error)
|
||||
|
||||
var unsupportedArrayTypeErrTemp = "unsupported Array type '%s'"
|
||||
|
||||
// If you add Nullable type, that can be used in Array(Nullable(T)) add this type to ../codegen/nullable_appender/main.go in structure values.Types.
|
||||
// Run code generation.
|
||||
//go:generate go run ../codegen/nullable_appender -package $GOPACKAGE -file nullable_appender.go
|
||||
type Array struct {
|
||||
base
|
||||
depth int
|
||||
column Column
|
||||
nullable bool
|
||||
}
|
||||
|
||||
func (array *Array) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
return nil, fmt.Errorf("do not use Read method for Array(T) column")
|
||||
}
|
||||
|
||||
func (array *Array) WriteNull(nulls, encoder *binary.Encoder, v interface{}) error {
|
||||
if array.nullable {
|
||||
column, ok := array.column.(*Nullable)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot convert to nullable type")
|
||||
}
|
||||
return column.WriteNull(nulls, encoder, v)
|
||||
}
|
||||
return fmt.Errorf("write null to not nullable array")
|
||||
}
|
||||
|
||||
func (array *Array) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return array.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func (array *Array) ReadArray(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
offsets = make([][]uint64, array.depth)
|
||||
values = make([]interface{}, rows)
|
||||
)
|
||||
|
||||
// Read offsets
|
||||
lastOffset := uint64(rows)
|
||||
for i := 0; i < array.depth; i++ {
|
||||
offset := make([]uint64, lastOffset)
|
||||
for j := uint64(0); j < lastOffset; j++ {
|
||||
if offset[j], err = decoder.UInt64(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
offsets[i] = offset
|
||||
lastOffset = 0
|
||||
if len(offset) > 0 {
|
||||
lastOffset = offset[len(offset)-1]
|
||||
}
|
||||
}
|
||||
|
||||
var cd columnDecoder
|
||||
|
||||
switch column := array.column.(type) {
|
||||
case *Nullable:
|
||||
nullRows, err := column.ReadNull(decoder, int(lastOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cd = func(rows []interface{}) columnDecoder {
|
||||
i := 0
|
||||
return func() (interface{}, error) {
|
||||
if i > len(rows) {
|
||||
return nil, errors.New("not enough rows to return while parsing Null column")
|
||||
}
|
||||
ret := rows[i]
|
||||
i++
|
||||
return ret, nil
|
||||
}
|
||||
}(nullRows)
|
||||
case *Tuple:
|
||||
tupleRows, err := column.ReadTuple(decoder, int(lastOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// closure to return fully assembled tuple values as if they
|
||||
// were decoded one at a time
|
||||
cd = func(rows []interface{}) columnDecoder {
|
||||
i := 0
|
||||
return func() (interface{}, error) {
|
||||
if i > len(rows) {
|
||||
return nil, errors.New("not enough rows to return while parsing Tuple column")
|
||||
}
|
||||
ret := rows[i]
|
||||
i++
|
||||
return ret, nil
|
||||
}
|
||||
}(tupleRows)
|
||||
default:
|
||||
cd = func(decoder *binary.Decoder) columnDecoder {
|
||||
return func() (interface{}, error) { return array.column.Read(decoder, array.nullable) }
|
||||
}(decoder)
|
||||
}
|
||||
|
||||
// Read values
|
||||
for i := 0; i < rows; i++ {
|
||||
if values[i], err = array.read(cd, offsets, uint64(i), 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (array *Array) read(readColumn columnDecoder, offsets [][]uint64, index uint64, level int) (interface{}, error) {
|
||||
end := offsets[level][index]
|
||||
start := uint64(0)
|
||||
if index > 0 {
|
||||
start = offsets[level][index-1]
|
||||
}
|
||||
|
||||
scanT := array.column.ScanType()
|
||||
slice := reflect.MakeSlice(array.arrayType(level), 0, int(end-start))
|
||||
for i := start; i < end; i++ {
|
||||
var (
|
||||
value interface{}
|
||||
err error
|
||||
)
|
||||
if level == array.depth-1 {
|
||||
value, err = readColumn()
|
||||
} else {
|
||||
value, err = array.read(readColumn, offsets, i, level+1)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if array.nullable && level == array.depth-1 {
|
||||
f, ok := nullableAppender[scanT.String()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(unsupportedArrayTypeErrTemp, scanT.String())
|
||||
}
|
||||
|
||||
cSlice, err := f(value, slice)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice = cSlice
|
||||
} else {
|
||||
slice = reflect.Append(slice, reflect.ValueOf(value))
|
||||
}
|
||||
|
||||
}
|
||||
return slice.Interface(), nil
|
||||
}
|
||||
|
||||
func (array *Array) arrayType(level int) reflect.Type {
|
||||
t := array.column.ScanType()
|
||||
for i := 0; i < array.depth-level; i++ {
|
||||
t = reflect.SliceOf(t)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (array *Array) Depth() int {
|
||||
return array.depth
|
||||
}
|
||||
|
||||
func parseArray(name, chType string, timezone *time.Location) (*Array, error) {
|
||||
if len(chType) < 11 {
|
||||
return nil, fmt.Errorf("invalid Array column type: %s", chType)
|
||||
}
|
||||
var (
|
||||
depth int
|
||||
columnType = chType
|
||||
)
|
||||
|
||||
loop:
|
||||
for _, str := range strings.Split(chType, "Array(") {
|
||||
switch {
|
||||
case len(str) == 0:
|
||||
depth++
|
||||
default:
|
||||
chType = str[:len(str)-depth]
|
||||
break loop
|
||||
}
|
||||
}
|
||||
column, err := Factory(name, chType, timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Array(T): %v", err)
|
||||
}
|
||||
|
||||
var scanType interface{}
|
||||
switch t := column.ScanType(); t {
|
||||
case arrayBaseTypes[int8(0)]:
|
||||
scanType = []int8{}
|
||||
case arrayBaseTypes[int16(0)]:
|
||||
scanType = []int16{}
|
||||
case arrayBaseTypes[int32(0)]:
|
||||
scanType = []int32{}
|
||||
case arrayBaseTypes[int64(0)]:
|
||||
scanType = []int64{}
|
||||
case arrayBaseTypes[uint8(0)]:
|
||||
scanType = []uint8{}
|
||||
case arrayBaseTypes[uint16(0)]:
|
||||
scanType = []uint16{}
|
||||
case arrayBaseTypes[uint32(0)]:
|
||||
scanType = []uint32{}
|
||||
case arrayBaseTypes[uint64(0)]:
|
||||
scanType = []uint64{}
|
||||
case arrayBaseTypes[float32(0)]:
|
||||
scanType = []float32{}
|
||||
case arrayBaseTypes[float64(0)]:
|
||||
scanType = []float64{}
|
||||
case arrayBaseTypes[string("")]:
|
||||
scanType = []string{}
|
||||
case arrayBaseTypes[time.Time{}]:
|
||||
scanType = []time.Time{}
|
||||
case arrayBaseTypes[IPv4{}], arrayBaseTypes[IPv6{}]:
|
||||
scanType = []net.IP{}
|
||||
case reflect.ValueOf([]interface{}{}).Type():
|
||||
scanType = [][]interface{}{}
|
||||
|
||||
//nullable
|
||||
case arrayBaseTypes[ptrInt8T]:
|
||||
scanType = []*int8{}
|
||||
case arrayBaseTypes[ptrInt16T]:
|
||||
scanType = []*int16{}
|
||||
case arrayBaseTypes[ptrInt32T]:
|
||||
scanType = []*int32{}
|
||||
case arrayBaseTypes[ptrInt64T]:
|
||||
scanType = []*int64{}
|
||||
case arrayBaseTypes[ptrUInt8T]:
|
||||
scanType = []*uint8{}
|
||||
case arrayBaseTypes[ptrUInt16T]:
|
||||
scanType = []*uint16{}
|
||||
case arrayBaseTypes[ptrUInt32T]:
|
||||
scanType = []*uint32{}
|
||||
case arrayBaseTypes[ptrUInt64T]:
|
||||
scanType = []*uint64{}
|
||||
case arrayBaseTypes[ptrFloat32]:
|
||||
scanType = []*float32{}
|
||||
case arrayBaseTypes[ptrFloat64]:
|
||||
scanType = []*float64{}
|
||||
case arrayBaseTypes[ptrString]:
|
||||
scanType = []*string{}
|
||||
case arrayBaseTypes[ptrTime]:
|
||||
scanType = []*time.Time{}
|
||||
case arrayBaseTypes[ptrIPv4], arrayBaseTypes[ptrIPv6]:
|
||||
scanType = []*net.IP{}
|
||||
default:
|
||||
return nil, fmt.Errorf(unsupportedArrayTypeErrTemp, column.ScanType().Name())
|
||||
}
|
||||
return &Array{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: columnType,
|
||||
valueOf: reflect.ValueOf(scanType),
|
||||
},
|
||||
depth: depth,
|
||||
column: column,
|
||||
nullable: strings.HasPrefix(column.CHType(), "Nullable"),
|
||||
}, nil
|
||||
}
|
205
vendor/github.com/ClickHouse/clickhouse-go/lib/column/column.go
generated
vendored
205
vendor/github.com/ClickHouse/clickhouse-go/lib/column/column.go
generated
vendored
@ -1,205 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Column interface {
|
||||
Name() string
|
||||
CHType() string
|
||||
ScanType() reflect.Type
|
||||
Read(*binary.Decoder, bool) (interface{}, error)
|
||||
Write(*binary.Encoder, interface{}) error
|
||||
defaultValue() interface{}
|
||||
Depth() int
|
||||
}
|
||||
|
||||
func Factory(name, chType string, timezone *time.Location) (Column, error) {
|
||||
switch chType {
|
||||
case "Int8":
|
||||
return &Int8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int16":
|
||||
return &Int16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int32":
|
||||
return &Int32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int64":
|
||||
return &Int64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[int64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt8":
|
||||
return &UInt8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt16":
|
||||
return &UInt16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt32":
|
||||
return &UInt32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt64":
|
||||
return &UInt64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[uint64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float32":
|
||||
return &Float32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[float32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float64":
|
||||
return &Float64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[float64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "String":
|
||||
return &String{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "UUID":
|
||||
return &UUID{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "Date":
|
||||
_, offset := time.Unix(0, 0).In(timezone).Zone()
|
||||
return &Date{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
offset: int64(offset),
|
||||
}, nil
|
||||
case "IPv4":
|
||||
return &IPv4{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[IPv4{}],
|
||||
},
|
||||
}, nil
|
||||
case "IPv6":
|
||||
return &IPv6{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[IPv6{}],
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "DateTime") && !strings.HasPrefix(chType, "DateTime64"):
|
||||
return &DateTime{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: "DateTime",
|
||||
valueOf: columnBaseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
}, nil
|
||||
case strings.HasPrefix(chType, "DateTime64"):
|
||||
return &DateTime64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
}, nil
|
||||
case strings.HasPrefix(chType, "Array"):
|
||||
return parseArray(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "Nullable"):
|
||||
return parseNullable(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "FixedString"):
|
||||
return parseFixedString(name, chType)
|
||||
case strings.HasPrefix(chType, "Enum8"), strings.HasPrefix(chType, "Enum16"):
|
||||
return parseEnum(name, chType)
|
||||
case strings.HasPrefix(chType, "Decimal"):
|
||||
return parseDecimal(name, chType)
|
||||
case strings.HasPrefix(chType, "SimpleAggregateFunction"):
|
||||
if nestedType, err := getNestedType(chType, "SimpleAggregateFunction"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return Factory(name, nestedType, timezone)
|
||||
}
|
||||
case strings.HasPrefix(chType, "Tuple"):
|
||||
return parseTuple(name, chType, timezone)
|
||||
}
|
||||
return nil, fmt.Errorf("column: unhandled type %v", chType)
|
||||
}
|
||||
|
||||
func getNestedType(chType string, wrapType string) (string, error) {
|
||||
prefixLen := len(wrapType) + 1
|
||||
suffixLen := 1
|
||||
|
||||
if len(chType) > prefixLen+suffixLen {
|
||||
nested := strings.Split(chType[prefixLen:len(chType)-suffixLen], ",")
|
||||
if len(nested) == 2 {
|
||||
return strings.TrimSpace(nested[1]), nil
|
||||
}
|
||||
|
||||
if len(nested) == 3 {
|
||||
return strings.TrimSpace(strings.Join(nested[1:], ",")), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("column: invalid %s type (%s)", wrapType, chType)
|
||||
}
|
115
vendor/github.com/ClickHouse/clickhouse-go/lib/column/common.go
generated
vendored
115
vendor/github.com/ClickHouse/clickhouse-go/lib/column/common.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ErrUnexpectedType struct {
|
||||
Column Column
|
||||
T interface{}
|
||||
}
|
||||
|
||||
func (err *ErrUnexpectedType) Error() string {
|
||||
return fmt.Sprintf("%s: unexpected type %T", err.Column, err.T)
|
||||
}
|
||||
|
||||
var columnBaseTypes = map[interface{}]reflect.Value{
|
||||
int8(0): reflect.ValueOf(int8(0)),
|
||||
int16(0): reflect.ValueOf(int16(0)),
|
||||
int32(0): reflect.ValueOf(int32(0)),
|
||||
int64(0): reflect.ValueOf(int64(0)),
|
||||
uint8(0): reflect.ValueOf(uint8(0)),
|
||||
uint16(0): reflect.ValueOf(uint16(0)),
|
||||
uint32(0): reflect.ValueOf(uint32(0)),
|
||||
uint64(0): reflect.ValueOf(uint64(0)),
|
||||
float32(0): reflect.ValueOf(float32(0)),
|
||||
float64(0): reflect.ValueOf(float64(0)),
|
||||
string(""): reflect.ValueOf(string("")),
|
||||
time.Time{}: reflect.ValueOf(time.Time{}),
|
||||
IPv4{}: reflect.ValueOf(net.IPv4zero),
|
||||
IPv6{}: reflect.ValueOf(net.IPv6unspecified),
|
||||
}
|
||||
|
||||
type ptrTo uint8
|
||||
|
||||
const (
|
||||
ptrInt8T ptrTo = iota
|
||||
ptrInt16T
|
||||
ptrInt32T
|
||||
ptrInt64T
|
||||
ptrUInt8T
|
||||
ptrUInt16T
|
||||
ptrUInt32T
|
||||
ptrUInt64T
|
||||
ptrFloat32
|
||||
ptrFloat64
|
||||
ptrString
|
||||
ptrTime
|
||||
ptrIPv4
|
||||
ptrIPv6
|
||||
)
|
||||
|
||||
var arrayBaseTypes = map[interface{}]reflect.Type{
|
||||
int8(0): reflect.ValueOf(int8(0)).Type(),
|
||||
int16(0): reflect.ValueOf(int16(0)).Type(),
|
||||
int32(0): reflect.ValueOf(int32(0)).Type(),
|
||||
int64(0): reflect.ValueOf(int64(0)).Type(),
|
||||
uint8(0): reflect.ValueOf(uint8(0)).Type(),
|
||||
uint16(0): reflect.ValueOf(uint16(0)).Type(),
|
||||
uint32(0): reflect.ValueOf(uint32(0)).Type(),
|
||||
uint64(0): reflect.ValueOf(uint64(0)).Type(),
|
||||
float32(0): reflect.ValueOf(float32(0)).Type(),
|
||||
float64(0): reflect.ValueOf(float64(0)).Type(),
|
||||
string(""): reflect.ValueOf(string("")).Type(),
|
||||
time.Time{}: reflect.ValueOf(time.Time{}).Type(),
|
||||
IPv4{}: reflect.ValueOf(net.IPv4zero).Type(),
|
||||
IPv6{}: reflect.ValueOf(net.IPv6unspecified).Type(),
|
||||
|
||||
// nullable
|
||||
ptrInt8T: reflect.PtrTo(reflect.ValueOf(int8(0)).Type()),
|
||||
ptrInt16T: reflect.PtrTo(reflect.ValueOf(int16(0)).Type()),
|
||||
ptrInt32T: reflect.PtrTo(reflect.ValueOf(int32(0)).Type()),
|
||||
ptrInt64T: reflect.PtrTo(reflect.ValueOf(int64(0)).Type()),
|
||||
ptrUInt8T: reflect.PtrTo(reflect.ValueOf(uint8(0)).Type()),
|
||||
ptrUInt16T: reflect.PtrTo(reflect.ValueOf(uint16(0)).Type()),
|
||||
ptrUInt32T: reflect.PtrTo(reflect.ValueOf(uint32(0)).Type()),
|
||||
ptrUInt64T: reflect.PtrTo(reflect.ValueOf(uint64(0)).Type()),
|
||||
ptrFloat32: reflect.PtrTo(reflect.ValueOf(float32(0)).Type()),
|
||||
ptrFloat64: reflect.PtrTo(reflect.ValueOf(float64(0)).Type()),
|
||||
ptrString: reflect.PtrTo(reflect.ValueOf(string("")).Type()),
|
||||
ptrTime: reflect.PtrTo(reflect.ValueOf(time.Time{}).Type()),
|
||||
ptrIPv4: reflect.PtrTo(reflect.ValueOf(net.IPv4zero).Type()),
|
||||
ptrIPv6: reflect.PtrTo(reflect.ValueOf(net.IPv6unspecified).Type()),
|
||||
}
|
||||
|
||||
type base struct {
|
||||
name, chType string
|
||||
valueOf reflect.Value
|
||||
}
|
||||
|
||||
func (base *base) Name() string {
|
||||
return base.name
|
||||
}
|
||||
|
||||
func (base *base) CHType() string {
|
||||
return base.chType
|
||||
}
|
||||
|
||||
func (base *base) ScanType() reflect.Type {
|
||||
return base.valueOf.Type()
|
||||
}
|
||||
|
||||
func (base *base) defaultValue() interface{} {
|
||||
return base.valueOf.Interface()
|
||||
}
|
||||
|
||||
func (base *base) String() string {
|
||||
return fmt.Sprintf("%s (%s)", base.name, base.chType)
|
||||
}
|
||||
|
||||
func (base *base) Depth() int {
|
||||
return 0
|
||||
}
|
84
vendor/github.com/ClickHouse/clickhouse-go/lib/column/date.go
generated
vendored
84
vendor/github.com/ClickHouse/clickhouse-go/lib/column/date.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Date struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (dt *Date) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
sec, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec)*24*3600-dt.offset, 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *Date) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = value.Unix() + int64(offset)
|
||||
case int16:
|
||||
return encoder.Int16(value)
|
||||
case int32:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case uint32:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case uint64:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case int64:
|
||||
timestamp = value + dt.offset
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = (*value).Unix() + int64(offset)
|
||||
case *int16:
|
||||
return encoder.Int16(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value) + dt.offset
|
||||
case *int64:
|
||||
timestamp = *value + dt.offset
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int16(int16(timestamp / 24 / 3600))
|
||||
}
|
||||
|
||||
func (dt *Date) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
0, 0, 0, 0, time.UTC,
|
||||
).Unix(), nil
|
||||
}
|
87
vendor/github.com/ClickHouse/clickhouse-go/lib/column/datetime.go
generated
vendored
87
vendor/github.com/ClickHouse/clickhouse-go/lib/column/datetime.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type DateTime struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (dt *DateTime) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
sec, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec), 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *DateTime) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
if !value.IsZero() {
|
||||
timestamp = value.Unix()
|
||||
}
|
||||
case int16:
|
||||
timestamp = int64(value)
|
||||
case int32:
|
||||
timestamp = int64(value)
|
||||
case uint32:
|
||||
timestamp = int64(value)
|
||||
case uint64:
|
||||
timestamp = int64(value)
|
||||
case int64:
|
||||
timestamp = value
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *time.Time:
|
||||
if value != nil && !(*value).IsZero() {
|
||||
timestamp = (*value).Unix()
|
||||
}
|
||||
case *int16:
|
||||
timestamp = int64(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value)
|
||||
case *int64:
|
||||
timestamp = *value
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int32(int32(timestamp))
|
||||
}
|
||||
|
||||
func (dt *DateTime) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02 15:04:05", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
time.Time(tv).Hour(),
|
||||
time.Time(tv).Minute(),
|
||||
time.Time(tv).Second(),
|
||||
0, time.Local, //use local timzone when insert into clickhouse
|
||||
).Unix(), nil
|
||||
}
|
100
vendor/github.com/ClickHouse/clickhouse-go/lib/column/datetime64.go
generated
vendored
100
vendor/github.com/ClickHouse/clickhouse-go/lib/column/datetime64.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type DateTime64 struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (dt *DateTime64) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
value, err := decoder.Int64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
precision, err := dt.getPrecision()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nano int64
|
||||
if precision < 19 {
|
||||
nano = value * int64(math.Pow10(9-precision))
|
||||
}
|
||||
|
||||
sec := nano / int64(10e8)
|
||||
nsec := nano - sec*10e8
|
||||
|
||||
return time.Unix(sec, nsec).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *DateTime64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
if !value.IsZero() {
|
||||
timestamp = value.UnixNano()
|
||||
}
|
||||
case uint64:
|
||||
timestamp = int64(value)
|
||||
case int64:
|
||||
timestamp = value
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *time.Time:
|
||||
if value != nil && !(*value).IsZero() {
|
||||
timestamp = (*value).UnixNano()
|
||||
}
|
||||
case *int64:
|
||||
timestamp = *value
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
precision, err := dt.getPrecision()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timestamp = timestamp / int64(math.Pow10(9-precision))
|
||||
|
||||
return encoder.Int64(timestamp)
|
||||
}
|
||||
|
||||
func (dt *DateTime64) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02 15:04:05.999", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return tv.UnixNano(), nil
|
||||
}
|
||||
|
||||
func (dt *DateTime64) getPrecision() (int, error) {
|
||||
dtParams := dt.base.chType[11 : len(dt.base.chType)-1]
|
||||
precision, err := strconv.Atoi(strings.Split(dtParams, ",")[0])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return precision, nil
|
||||
}
|
377
vendor/github.com/ClickHouse/clickhouse-go/lib/column/decimal.go
generated
vendored
377
vendor/github.com/ClickHouse/clickhouse-go/lib/column/decimal.go
generated
vendored
@ -1,377 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
b "encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
// Table of powers of 10 for fast casting from floating types to decimal type
|
||||
// representations.
|
||||
var factors10 = []float64{
|
||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13,
|
||||
1e14, 1e15, 1e16, 1e17, 1e18,
|
||||
}
|
||||
|
||||
// Decimal represents Decimal(P, S) ClickHouse. Decimal is represented as
|
||||
// integral. Also floating-point types are supported for query parameters.
|
||||
//
|
||||
// Since there is no support for int128 in Golang, decimals with precision 19
|
||||
// through 38 are represented as 16 little-endian bytes.
|
||||
type Decimal struct {
|
||||
base
|
||||
nobits int // its domain is {32, 64}
|
||||
precision int
|
||||
scale int
|
||||
}
|
||||
|
||||
func (d *Decimal) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return decoder.Int32()
|
||||
case 64:
|
||||
return decoder.Int64()
|
||||
case 128:
|
||||
return decoder.Decimal128()
|
||||
default:
|
||||
return nil, errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return d.write32(encoder, v)
|
||||
case 64:
|
||||
return d.write64(encoder, v)
|
||||
case 128:
|
||||
return d.write128(encoder, v)
|
||||
default:
|
||||
return errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int32(floating float64) int32 {
|
||||
fixed := int32(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int64(floating float64) int64 {
|
||||
fixed := int64(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) write32(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int32(int32(v))
|
||||
case int16:
|
||||
return encoder.Int32(int32(v))
|
||||
case int32:
|
||||
return encoder.Int32(int32(v))
|
||||
case int64:
|
||||
if v > math.MaxInt32 || v < math.MinInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from int64 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(v))
|
||||
|
||||
case uint8:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint16:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint32:
|
||||
if v > math.MaxInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from uint32 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(v))
|
||||
case uint64:
|
||||
if v > math.MaxInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from uint64 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(v))
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int32:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int64:
|
||||
if *v > math.MaxInt32 || *v < math.MinInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from int64 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(*v))
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint32:
|
||||
if *v > math.MaxInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from uint34 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint64:
|
||||
if *v > math.MaxInt32 {
|
||||
return errors.New("overflow when narrowing type conversion from uint64 to int32")
|
||||
}
|
||||
return encoder.Int32(int32(*v))
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) write64(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int8:
|
||||
return encoder.Int64(int64(v))
|
||||
case int16:
|
||||
return encoder.Int64(int64(v))
|
||||
case int32:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(int64(v))
|
||||
|
||||
case uint8:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint16:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint32:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint64:
|
||||
if v > math.MaxInt64 {
|
||||
return errors.New("overflow when narrowing type conversion from uint64 to int64")
|
||||
}
|
||||
return encoder.Int64(int64(v))
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(int64(*v))
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint64:
|
||||
if *v > math.MaxInt64 {
|
||||
return errors.New("overflow when narrowing type conversion from uint64 to int64")
|
||||
}
|
||||
return encoder.Int64(int64(*v))
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
// Turns an int64 into 16 little-endian bytes.
|
||||
func int64ToDecimal128(v int64) []byte {
|
||||
bytes := make([]byte, 16)
|
||||
b.LittleEndian.PutUint64(bytes[:8], uint64(v))
|
||||
sign := 0
|
||||
if v < 0 {
|
||||
sign = -1
|
||||
}
|
||||
b.LittleEndian.PutUint64(bytes[8:], uint64(sign))
|
||||
return bytes
|
||||
}
|
||||
|
||||
// Turns a uint64 into 16 little-endian bytes.
|
||||
func uint64ToDecimal128(v uint64) []byte {
|
||||
bytes := make([]byte, 16)
|
||||
b.LittleEndian.PutUint64(bytes[:8], uint64(v))
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (d *Decimal) write128(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(v)))
|
||||
case int8:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(v)))
|
||||
case int16:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(v)))
|
||||
case int32:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(v)))
|
||||
case int64:
|
||||
return encoder.Decimal128(int64ToDecimal128(v))
|
||||
|
||||
case uint8:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(v)))
|
||||
case uint16:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(v)))
|
||||
case uint32:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(v)))
|
||||
case uint64:
|
||||
return encoder.Decimal128(uint64ToDecimal128(v))
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Decimal128(int64ToDecimal128(fixed))
|
||||
case float64:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Decimal128(int64ToDecimal128(fixed))
|
||||
|
||||
case []byte:
|
||||
if len(v) != 16 {
|
||||
return errors.New("expected 16 bytes")
|
||||
}
|
||||
return encoder.Decimal128(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(*v)))
|
||||
case *int8:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(*v)))
|
||||
case *int16:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(*v)))
|
||||
case *int32:
|
||||
return encoder.Decimal128(int64ToDecimal128(int64(*v)))
|
||||
case *int64:
|
||||
return encoder.Decimal128(int64ToDecimal128(*v))
|
||||
|
||||
case *uint8:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(*v)))
|
||||
case *uint16:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(*v)))
|
||||
case *uint32:
|
||||
return encoder.Decimal128(uint64ToDecimal128(uint64(*v)))
|
||||
case *uint64:
|
||||
return encoder.Decimal128(uint64ToDecimal128(*v))
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Decimal128(int64ToDecimal128(fixed))
|
||||
case *float64:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Decimal128(int64ToDecimal128(fixed))
|
||||
|
||||
case *[]byte:
|
||||
if len(*v) != 16 {
|
||||
return errors.New("expected 16 bytes")
|
||||
}
|
||||
return encoder.Decimal128(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDecimal(name, chType string) (Column, error) {
|
||||
switch {
|
||||
case len(chType) < 12:
|
||||
fallthrough
|
||||
case !strings.HasPrefix(chType, "Decimal"):
|
||||
fallthrough
|
||||
case chType[7] != '(':
|
||||
fallthrough
|
||||
case chType[len(chType)-1] != ')':
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
var params = strings.Split(chType[8:len(chType)-1], ",")
|
||||
|
||||
if len(params) != 2 {
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
params[0] = strings.TrimSpace(params[0])
|
||||
params[1] = strings.TrimSpace(params[1])
|
||||
|
||||
var err error
|
||||
var decimal = &Decimal{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
}
|
||||
|
||||
if decimal.precision, err = strconv.Atoi(params[0]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.precision < 1 {
|
||||
return nil, errors.New("wrong precision of Decimal type")
|
||||
}
|
||||
|
||||
if decimal.scale, err = strconv.Atoi(params[1]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.scale < 0 || decimal.scale > decimal.precision {
|
||||
return nil, errors.New("wrong scale of Decimal type")
|
||||
}
|
||||
|
||||
switch {
|
||||
case decimal.precision <= 9:
|
||||
decimal.nobits = 32
|
||||
decimal.valueOf = columnBaseTypes[int32(0)]
|
||||
case decimal.precision <= 18:
|
||||
decimal.nobits = 64
|
||||
decimal.valueOf = columnBaseTypes[int64(0)]
|
||||
case decimal.precision <= 38:
|
||||
decimal.nobits = 128
|
||||
decimal.valueOf = reflect.ValueOf([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
default:
|
||||
return nil, errors.New("precision of Decimal exceeds max bound")
|
||||
}
|
||||
|
||||
return decimal, nil
|
||||
}
|
||||
|
||||
func (d *Decimal) GetPrecision() int {
|
||||
return d.precision
|
||||
}
|
||||
|
||||
func (d *Decimal) GetScale() int {
|
||||
return d.scale
|
||||
}
|
175
vendor/github.com/ClickHouse/clickhouse-go/lib/column/enum.go
generated
vendored
175
vendor/github.com/ClickHouse/clickhouse-go/lib/column/enum.go
generated
vendored
@ -1,175 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Enum struct {
|
||||
iv map[string]interface{}
|
||||
vi map[interface{}]string
|
||||
base
|
||||
baseType interface{}
|
||||
}
|
||||
|
||||
func (enum *Enum) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
var (
|
||||
err error
|
||||
ident interface{}
|
||||
)
|
||||
switch enum.baseType.(type) {
|
||||
case int16:
|
||||
if ident, err = decoder.Int16(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
if ident, err = decoder.Int8(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ident, found := enum.vi[ident]; found || isNull {
|
||||
return ident, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", ident)
|
||||
}
|
||||
|
||||
func (enum *Enum) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return enum.encodeFromString(v, encoder)
|
||||
case uint8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(int8(v))
|
||||
}
|
||||
case int8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(v)
|
||||
}
|
||||
case uint16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
case int16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(v)
|
||||
}
|
||||
case int64:
|
||||
switch enum.baseType.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(int8(v))
|
||||
case int16:
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
// nullable enums
|
||||
case *string:
|
||||
return enum.encodeFromString(*v, encoder)
|
||||
case *uint8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(int8(*v))
|
||||
}
|
||||
case *int8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(*v)
|
||||
}
|
||||
case *uint16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(int16(*v))
|
||||
}
|
||||
case *int16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(*v)
|
||||
}
|
||||
case *int64:
|
||||
switch enum.baseType.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(int8(*v))
|
||||
case int16:
|
||||
return encoder.Int16(int16(*v))
|
||||
}
|
||||
}
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: enum,
|
||||
}
|
||||
}
|
||||
|
||||
func (enum *Enum) encodeFromString(v string, encoder *binary.Encoder) error {
|
||||
ident, found := enum.iv[v]
|
||||
if !found {
|
||||
return fmt.Errorf("invalid Enum ident: %s", v)
|
||||
}
|
||||
switch ident := ident.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(ident)
|
||||
case int16:
|
||||
return encoder.Int16(ident)
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: ident,
|
||||
Column: enum,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (enum *Enum) defaultValue() interface{} {
|
||||
return enum.baseType
|
||||
}
|
||||
|
||||
func parseEnum(name, chType string) (*Enum, error) {
|
||||
var (
|
||||
data string
|
||||
isEnum16 bool
|
||||
)
|
||||
if len(chType) < 8 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "Enum8"):
|
||||
data = chType[6:]
|
||||
case strings.HasPrefix(chType, "Enum16"):
|
||||
data = chType[7:]
|
||||
isEnum16 = true
|
||||
default:
|
||||
return nil, fmt.Errorf("'%s' is not Enum type", chType)
|
||||
}
|
||||
enum := Enum{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
iv: make(map[string]interface{}),
|
||||
vi: make(map[interface{}]string),
|
||||
}
|
||||
for _, block := range strings.Split(data[:len(data)-1], ",") {
|
||||
parts := strings.Split(block, "=")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
var (
|
||||
ident = strings.TrimSpace(parts[0])
|
||||
value, err = strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 16)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", chType)
|
||||
}
|
||||
{
|
||||
var (
|
||||
ident = ident[1 : len(ident)-1]
|
||||
value interface{} = int16(value)
|
||||
)
|
||||
if !isEnum16 {
|
||||
value = int8(value.(int16))
|
||||
}
|
||||
if enum.baseType == nil {
|
||||
enum.baseType = value
|
||||
}
|
||||
enum.iv[ident] = value
|
||||
enum.vi[value] = ident
|
||||
}
|
||||
}
|
||||
return &enum, nil
|
||||
}
|
71
vendor/github.com/ClickHouse/clickhouse-go/lib/column/fixed_string.go
generated
vendored
71
vendor/github.com/ClickHouse/clickhouse-go/lib/column/fixed_string.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type FixedString struct {
|
||||
base
|
||||
len int
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (str *FixedString) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Fixed(str.len)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(v), nil
|
||||
}
|
||||
|
||||
func (str *FixedString) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var fixedString []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
fixedString = binary.Str2Bytes(v)
|
||||
case []byte:
|
||||
fixedString = v
|
||||
case encoding.BinaryMarshaler:
|
||||
bytes, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fixedString = bytes
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(fixedString) > str.len:
|
||||
return fmt.Errorf("too large value '%s' (expected %d, got %d)", fixedString, str.len, len(fixedString))
|
||||
case len(fixedString) < str.len:
|
||||
tmp := make([]byte, str.len)
|
||||
copy(tmp, fixedString)
|
||||
fixedString = tmp
|
||||
}
|
||||
if _, err := encoder.Write(fixedString); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFixedString(name, chType string) (*FixedString, error) {
|
||||
var strLen int
|
||||
if _, err := fmt.Sscanf(chType, "FixedString(%d)", &strLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FixedString{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: columnBaseTypes[string("")],
|
||||
},
|
||||
len: strLen,
|
||||
}, nil
|
||||
}
|
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/float32.go
generated
vendored
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/float32.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Float32 struct{ base }
|
||||
|
||||
func (Float32) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Float32()
|
||||
if err != nil {
|
||||
return float32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float32(v)
|
||||
case float64:
|
||||
return encoder.Float32(float32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float32(*v)
|
||||
case *float64:
|
||||
return encoder.Float32(float32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/float64.go
generated
vendored
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/float64.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Float64 struct{ base }
|
||||
|
||||
func (Float64) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Float64()
|
||||
if err != nil {
|
||||
return float64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float64(float64(v))
|
||||
case float64:
|
||||
return encoder.Float64(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float64(float64(*v))
|
||||
case *float64:
|
||||
return encoder.Float64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
39
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int16.go
generated
vendored
39
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int16.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int16 struct{ base }
|
||||
|
||||
func (Int16) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return int16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int16:
|
||||
return encoder.Int16(v)
|
||||
case int64:
|
||||
return encoder.Int16(int16(v))
|
||||
case int:
|
||||
return encoder.Int16(int16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int16:
|
||||
return encoder.Int16(*v)
|
||||
case *int64:
|
||||
return encoder.Int16(int16(*v))
|
||||
case *int:
|
||||
return encoder.Int16(int16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
39
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int32.go
generated
vendored
39
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int32.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int32 struct{ base }
|
||||
|
||||
func (Int32) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return int32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int32:
|
||||
return encoder.Int32(v)
|
||||
case int64:
|
||||
return encoder.Int32(int32(v))
|
||||
case int:
|
||||
return encoder.Int32(int32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int32:
|
||||
return encoder.Int32(*v)
|
||||
case *int64:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int:
|
||||
return encoder.Int32(int32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
40
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int64.go
generated
vendored
40
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int64.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int64 struct{ base }
|
||||
|
||||
func (Int64) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Int64()
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(v)
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
49
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int8.go
generated
vendored
49
vendor/github.com/ClickHouse/clickhouse-go/lib/column/int8.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Int8 struct{ base }
|
||||
|
||||
func (Int8) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Int8()
|
||||
if err != nil {
|
||||
return int8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(v)
|
||||
case int64:
|
||||
return encoder.Int8(int8(v))
|
||||
case int:
|
||||
return encoder.Int8(int8(v))
|
||||
case bool:
|
||||
if v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int8(*v)
|
||||
case *int64:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *int:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *bool:
|
||||
if *v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
84
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ip.go
generated
vendored
84
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ip.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
/*
|
||||
IP type supporting for clickhouse as FixedString(16)
|
||||
*/
|
||||
|
||||
package column
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidScanType = errors.New("Invalid scan types")
|
||||
errInvalidScanValue = errors.New("Invalid scan value")
|
||||
)
|
||||
|
||||
// IP column type
|
||||
type IP net.IP
|
||||
|
||||
// Value implements the driver.Valuer interface, json field interface
|
||||
// Alignment on the right side
|
||||
func (ip IP) Value() (driver.Value, error) {
|
||||
return ip.MarshalBinary()
|
||||
}
|
||||
|
||||
func (ip IP) MarshalBinary() ([]byte, error) {
|
||||
if len(ip) < 16 {
|
||||
var (
|
||||
buff = make([]byte, 16)
|
||||
j = 0
|
||||
)
|
||||
for i := 16 - len(ip); i < 16; i++ {
|
||||
buff[i] = ip[j]
|
||||
j++
|
||||
}
|
||||
for i := 0; i < 16-len(ip); i++ {
|
||||
buff[i] = '\x00'
|
||||
}
|
||||
if len(ip) == 4 {
|
||||
buff[11] = '\xff'
|
||||
buff[10] = '\xff'
|
||||
}
|
||||
return buff, nil
|
||||
}
|
||||
return []byte(ip), nil
|
||||
}
|
||||
|
||||
// Scan implements the driver.Valuer interface, json field interface
|
||||
func (ip *IP) Scan(value interface{}) (err error) {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
if len(v) == 4 || len(v) == 16 {
|
||||
*ip = IP(v)
|
||||
} else {
|
||||
err = errInvalidScanValue
|
||||
}
|
||||
case string:
|
||||
if v == "" {
|
||||
err = errInvalidScanValue
|
||||
return
|
||||
}
|
||||
if (len(v) == 4 || len(v) == 16) && !strings.Contains(v, ".") && !strings.Contains(v, ":"){
|
||||
*ip = IP([]byte(v))
|
||||
return
|
||||
}
|
||||
if strings.Contains(v, ":") {
|
||||
*ip = IP(net.ParseIP(v))
|
||||
return
|
||||
}
|
||||
*ip = IP(net.ParseIP(v).To4())
|
||||
case net.IP:
|
||||
*ip = IP(v)
|
||||
default:
|
||||
err = errInvalidScanType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer interface
|
||||
func (ip IP) String() string {
|
||||
return net.IP(ip).String()
|
||||
}
|
54
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ipv4.go
generated
vendored
54
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ipv4.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type IPv4 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv4) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Fixed(4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IPv4(v[3], v[2], v[1], v[0]), nil
|
||||
}
|
||||
|
||||
func (ip *IPv4) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var netIP net.IP
|
||||
switch v.(type) {
|
||||
case string:
|
||||
netIP = net.ParseIP(v.(string))
|
||||
case net.IP:
|
||||
netIP = v.(net.IP)
|
||||
case *net.IP:
|
||||
netIP = *(v.(*net.IP))
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
|
||||
if netIP == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
ip4 := netIP.To4()
|
||||
if ip4 == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
if _, err := encoder.Write([]byte{ip4[3], ip4[2], ip4[1], ip4[0]}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
47
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ipv6.go
generated
vendored
47
vendor/github.com/ClickHouse/clickhouse-go/lib/column/ipv6.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type IPv6 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv6) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.Fixed(16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IP(v), nil
|
||||
}
|
||||
|
||||
func (ip *IPv6) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var netIP net.IP
|
||||
switch v.(type) {
|
||||
case string:
|
||||
netIP = net.ParseIP(v.(string))
|
||||
case net.IP:
|
||||
netIP = v.(net.IP)
|
||||
case *net.IP:
|
||||
netIP = *(v.(*net.IP))
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
|
||||
if netIP == nil {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
if _, err := encoder.Write([]byte(netIP.To16())); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
96
vendor/github.com/ClickHouse/clickhouse-go/lib/column/nullable.go
generated
vendored
96
vendor/github.com/ClickHouse/clickhouse-go/lib/column/nullable.go
generated
vendored
@ -1,96 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type Nullable struct {
|
||||
base
|
||||
column Column
|
||||
}
|
||||
|
||||
func (null *Nullable) ScanType() reflect.Type {
|
||||
return reflect.PtrTo(null.column.ScanType())
|
||||
}
|
||||
|
||||
func (null *Nullable) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
return null.column.Read(decoder, isNull)
|
||||
}
|
||||
|
||||
func (null *Nullable) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (null *Nullable) ReadNull(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
isNull byte
|
||||
value interface{}
|
||||
nulls = make([]byte, rows)
|
||||
values = make([]interface{}, rows)
|
||||
)
|
||||
for i := 0; i < rows; i++ {
|
||||
if isNull, err = decoder.ReadByte(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nulls[i] = isNull
|
||||
}
|
||||
for i, isNull := range nulls {
|
||||
switch value, err = null.column.Read(decoder, isNull != 0); true {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case isNull == 0:
|
||||
values[i] = value
|
||||
default:
|
||||
values[i] = nil
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
func (null *Nullable) WriteNull(nulls, encoder *binary.Encoder, v interface{}) error {
|
||||
if isNil(v) {
|
||||
if _, err := nulls.Write([]byte{1}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, null.column.defaultValue())
|
||||
}
|
||||
if _, err := nulls.Write([]byte{0}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func parseNullable(name, chType string, timezone *time.Location) (*Nullable, error) {
|
||||
if len(chType) < 14 {
|
||||
return nil, fmt.Errorf("invalid Nullable column type: %s", chType)
|
||||
}
|
||||
column, err := Factory(name, chType[9:][:len(chType)-10], timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Nullable(T): %v", err)
|
||||
}
|
||||
return &Nullable{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
column: column,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (null *Nullable) GetColumn() Column {
|
||||
return null.column
|
||||
}
|
||||
|
||||
func isNil(v interface{}) bool {
|
||||
if v == nil {
|
||||
return true
|
||||
}
|
||||
switch val := reflect.ValueOf(v); val.Type().Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return val.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
173
vendor/github.com/ClickHouse/clickhouse-go/lib/column/nullable_appender.go
generated
vendored
173
vendor/github.com/ClickHouse/clickhouse-go/lib/column/nullable_appender.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
|
||||
// DANGER! This code was autogenerated from template by clickhouse-go/lib/codegen/nullable_appender.
|
||||
// You shouldn't change it manually.
|
||||
// For more info check clickhouse-go/lib/codegen/nullable_appender/main.go
|
||||
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var nullableAppender = map[string]func(v interface{}, slice reflect.Value) (reflect.Value, error){
|
||||
|
||||
"*int8": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(int8)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type int8")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *int8
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*int16": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(int16)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type int16")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *int16
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*int32": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(int32)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type int32")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *int32
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*int64": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(int64)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type int64")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *int64
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*uint8": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(uint8)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type uint8")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *uint8
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*uint16": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(uint16)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type uint16")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *uint16
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*uint32": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(uint32)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type uint32")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *uint32
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*uint64": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(uint64)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type uint64")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *uint64
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*float32": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(float32)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type float32")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *float32
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*float64": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(float64)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type float64")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *float64
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*string": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(string)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type string")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *string
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*time.Time": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(time.Time)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type time.Time")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *time.Time
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
"*net.IP": func(v interface{}, slice reflect.Value) (reflect.Value, error) {
|
||||
if v != nil {
|
||||
v, ok := v.(net.IP)
|
||||
if !ok {
|
||||
return slice, fmt.Errorf("cannot assert to type net.IP")
|
||||
}
|
||||
return reflect.Append(slice, reflect.ValueOf(&v)), nil
|
||||
}
|
||||
var vNil *net.IP
|
||||
return reflect.Append(slice, reflect.ValueOf(vNil)), nil
|
||||
},
|
||||
|
||||
}
|
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/string.go
generated
vendored
35
vendor/github.com/ClickHouse/clickhouse-go/lib/column/string.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type String struct{ base }
|
||||
|
||||
func (String) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.String()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (str *String) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return encoder.String(v)
|
||||
case []byte:
|
||||
return encoder.RawString(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *string:
|
||||
return encoder.String(*v)
|
||||
case *[]byte:
|
||||
return encoder.RawString(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
110
vendor/github.com/ClickHouse/clickhouse-go/lib/column/tuple.go
generated
vendored
110
vendor/github.com/ClickHouse/clickhouse-go/lib/column/tuple.go
generated
vendored
@ -1,110 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Tuple struct {
|
||||
base
|
||||
columns []Column
|
||||
}
|
||||
|
||||
func (tuple *Tuple) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
return nil, fmt.Errorf("do not use Read method for Tuple(T) column")
|
||||
}
|
||||
|
||||
func (tuple *Tuple) ReadTuple(decoder *binary.Decoder, rows int) ([]interface{}, error) {
|
||||
var values = make([][]interface{}, rows)
|
||||
|
||||
for _, c := range tuple.columns {
|
||||
|
||||
switch column := c.(type) {
|
||||
case *Array:
|
||||
cols, err := column.ReadArray(decoder, rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rows; i++ {
|
||||
values[i] = append(values[i], cols[i])
|
||||
}
|
||||
|
||||
case *Nullable:
|
||||
cols, err := column.ReadNull(decoder, rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rows; i++ {
|
||||
values[i] = append(values[i], cols[i])
|
||||
}
|
||||
|
||||
case *Tuple:
|
||||
cols, err := column.ReadTuple(decoder, rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rows; i++ {
|
||||
values[i] = append(values[i], cols[i])
|
||||
}
|
||||
|
||||
default:
|
||||
for i := 0; i < rows; i++ {
|
||||
value, err := c.Read(decoder, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values[i] = append(values[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var ret = make([]interface{}, rows)
|
||||
for i := range values {
|
||||
ret[i] = values[i]
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (tuple *Tuple) Write(encoder *binary.Encoder, v interface{}) (err error) {
|
||||
return fmt.Errorf("unsupported Tuple(T) type [%T]", v)
|
||||
}
|
||||
|
||||
func parseTuple(name, chType string, timezone *time.Location) (Column, error) {
|
||||
var columnType = chType
|
||||
|
||||
chType = chType[6 : len(chType)-1]
|
||||
var types []string
|
||||
var last, diff int
|
||||
for i, b := range chType + "," {
|
||||
if b == '(' {
|
||||
diff++
|
||||
} else if b == ')' {
|
||||
diff--
|
||||
} else if b == ',' && diff == 0 {
|
||||
types = append(types, chType[last:i])
|
||||
last = i + 2
|
||||
}
|
||||
}
|
||||
|
||||
var columns = make([]Column, 0, len(types))
|
||||
for i, chType := range types {
|
||||
column, err := Factory(name+"."+strconv.Itoa(i+1), chType, timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", chType, err)
|
||||
}
|
||||
columns = append(columns, column)
|
||||
}
|
||||
|
||||
return &Tuple{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: columnType,
|
||||
valueOf: reflect.ValueOf([]interface{}{}),
|
||||
},
|
||||
columns: columns,
|
||||
}, nil
|
||||
}
|
43
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint16.go
generated
vendored
43
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint16.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt16 struct{ base }
|
||||
|
||||
func (UInt16) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return uint16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint16:
|
||||
return encoder.UInt16(v)
|
||||
case int64:
|
||||
return encoder.UInt16(uint16(v))
|
||||
case uint64:
|
||||
return encoder.UInt16(uint16(v))
|
||||
case int:
|
||||
return encoder.UInt16(uint16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint16:
|
||||
return encoder.UInt16(*v)
|
||||
case *int64:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
case *uint64:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
case *int:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
43
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint32.go
generated
vendored
43
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint32.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt32 struct{ base }
|
||||
|
||||
func (UInt32) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return uint32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint32:
|
||||
return encoder.UInt32(v)
|
||||
case uint64:
|
||||
return encoder.UInt32(uint32(v))
|
||||
case int64:
|
||||
return encoder.UInt32(uint32(v))
|
||||
case int:
|
||||
return encoder.UInt32(uint32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint64:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
case *uint32:
|
||||
return encoder.UInt32(*v)
|
||||
case *int64:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
case *int:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
44
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint64.go
generated
vendored
44
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint64.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt64 struct{ base }
|
||||
|
||||
func (UInt64) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return uint64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case uint64:
|
||||
return encoder.UInt64(v)
|
||||
case int64:
|
||||
return encoder.UInt64(uint64(v))
|
||||
case int:
|
||||
return encoder.UInt64(uint64(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint64:
|
||||
return encoder.UInt64(*v)
|
||||
case *int64:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
case *int:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
47
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint8.go
generated
vendored
47
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uint8.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type UInt8 struct{ base }
|
||||
|
||||
func (UInt8) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
v, err := decoder.UInt8()
|
||||
if err != nil {
|
||||
return uint8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return encoder.Bool(v)
|
||||
case uint8:
|
||||
return encoder.UInt8(v)
|
||||
case int64:
|
||||
return encoder.UInt8(uint8(v))
|
||||
case uint64:
|
||||
return encoder.UInt8(uint8(v))
|
||||
case int:
|
||||
return encoder.UInt8(uint8(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *bool:
|
||||
return encoder.Bool(*v)
|
||||
case *uint8:
|
||||
return encoder.UInt8(*v)
|
||||
case *int64:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
case *uint64:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
case *int:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
139
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uuid.go
generated
vendored
139
vendor/github.com/ClickHouse/clickhouse-go/lib/column/uuid.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
UUIDLen = 16
|
||||
NullUUID = "00000000-0000-0000-0000-000000000000"
|
||||
)
|
||||
|
||||
var ErrInvalidUUIDFormat = errors.New("invalid UUID format")
|
||||
|
||||
type UUID struct {
|
||||
base
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (*UUID) Read(decoder *binary.Decoder, isNull bool) (interface{}, error) {
|
||||
src, err := decoder.Fixed(UUIDLen)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
src = swap(src)
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
return string(uuid[:]), nil
|
||||
}
|
||||
|
||||
func (u *UUID) Write(encoder *binary.Encoder, v interface{}) (err error) {
|
||||
var uuid []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
if uuid, err = uuid2bytes(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case []byte:
|
||||
if len(v) != UUIDLen {
|
||||
return fmt.Errorf("invalid raw UUID len '%s' (expected %d, got %d)", uuid, UUIDLen, len(uuid))
|
||||
}
|
||||
uuid = make([]byte, 16)
|
||||
copy(uuid, v)
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
||||
|
||||
uuid = swap(uuid)
|
||||
|
||||
if _, err := encoder.Write(uuid); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func swap(src []byte) []byte {
|
||||
_ = src[15]
|
||||
src[0], src[7] = src[7], src[0]
|
||||
src[1], src[6] = src[6], src[1]
|
||||
src[2], src[5] = src[5], src[2]
|
||||
src[3], src[4] = src[4], src[3]
|
||||
src[8], src[15] = src[15], src[8]
|
||||
src[9], src[14] = src[14], src[9]
|
||||
src[10], src[13] = src[13], src[10]
|
||||
src[11], src[12] = src[12], src[11]
|
||||
return src
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
strLength := len(str)
|
||||
if strLength == 0 {
|
||||
str = NullUUID
|
||||
} else if strLength != 36 {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
326
vendor/github.com/ClickHouse/clickhouse-go/lib/data/block.go
generated
vendored
326
vendor/github.com/ClickHouse/clickhouse-go/lib/data/block.go
generated
vendored
@ -1,326 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
)
|
||||
|
||||
type offset [][]int
|
||||
|
||||
type Block struct {
|
||||
Values [][]interface{}
|
||||
Columns []column.Column
|
||||
NumRows uint64
|
||||
NumColumns uint64
|
||||
offsets []offset
|
||||
buffers []*buffer
|
||||
info blockInfo
|
||||
}
|
||||
|
||||
func (block *Block) Copy() *Block {
|
||||
return &Block{
|
||||
Columns: block.Columns,
|
||||
NumColumns: block.NumColumns,
|
||||
info: block.info,
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) ColumnNames() []string {
|
||||
names := make([]string, 0, len(block.Columns))
|
||||
for _, column := range block.Columns {
|
||||
names = append(names, column.Name())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (block *Block) Read(serverInfo *ServerInfo, decoder *binary.Decoder) (err error) {
|
||||
if serverInfo.Revision > 0 {
|
||||
if err = block.info.read(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if block.NumColumns, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if block.NumRows, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values = make([][]interface{}, block.NumColumns)
|
||||
if block.NumRows > 10 {
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
block.Values[i] = make([]interface{}, 0, block.NumRows)
|
||||
}
|
||||
}
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
var (
|
||||
value interface{}
|
||||
columnName string
|
||||
columnType string
|
||||
)
|
||||
if columnName, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if columnType, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := column.Factory(columnName, columnType, serverInfo.Timezone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block.Columns = append(block.Columns, c)
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
if block.Values[i], err = column.ReadArray(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if block.Values[i], err = column.ReadNull(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Tuple:
|
||||
if block.Values[i], err = column.ReadTuple(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
for row := 0; row < int(block.NumRows); row++ {
|
||||
if value, err = column.Read(decoder, false); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values[i] = append(block.Values[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) writeArray(col column.Column, value Value, num, level int) error {
|
||||
if level > col.Depth() {
|
||||
arrColumn, ok := col.(*column.Array)
|
||||
if ok && strings.Contains(col.CHType(), "Nullable") {
|
||||
return arrColumn.WriteNull(block.buffers[num].Offset, block.buffers[num].Column, value.Interface())
|
||||
}
|
||||
return col.Write(block.buffers[num].Column, value.Interface())
|
||||
}
|
||||
|
||||
switch {
|
||||
case value.Kind() == reflect.Slice:
|
||||
if len(block.offsets[num]) < level {
|
||||
block.offsets[num] = append(block.offsets[num], []int{value.Len()})
|
||||
} else {
|
||||
block.offsets[num][level-1] = append(
|
||||
block.offsets[num][level-1],
|
||||
block.offsets[num][level-1][len(block.offsets[num][level-1])-1]+value.Len(),
|
||||
)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
if err := block.writeArray(col, value.Index(i), num, level+1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err := col.Write(block.buffers[num].Column, value.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) AppendRow(args []driver.Value) error {
|
||||
if len(block.Columns) != len(args) {
|
||||
return fmt.Errorf("block: expected %d arguments (columns: %s), got %d", len(block.Columns), strings.Join(block.ColumnNames(), ", "), len(args))
|
||||
}
|
||||
block.Reserve()
|
||||
{
|
||||
block.NumRows++
|
||||
}
|
||||
for num, c := range block.Columns {
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
if args[num] == nil {
|
||||
return fmt.Errorf("unsupported [nil] value is passed in argument %d, column is not Nullable", num)
|
||||
}
|
||||
value := reflect.ValueOf(args[num])
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
if err := block.writeArray(c, newValue(value), num, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if err := column.WriteNull(block.buffers[num].Offset, block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := column.Write(block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) Reserve() {
|
||||
if len(block.buffers) == 0 {
|
||||
block.buffers = make([]*buffer, len(block.Columns))
|
||||
block.offsets = make([]offset, len(block.Columns))
|
||||
for i := 0; i < len(block.Columns); i++ {
|
||||
var (
|
||||
offsetBuffer = new(bytes.Buffer)
|
||||
columnBuffer = new(bytes.Buffer)
|
||||
)
|
||||
block.buffers[i] = &buffer{
|
||||
Offset: binary.NewEncoder(offsetBuffer),
|
||||
Column: binary.NewEncoder(columnBuffer),
|
||||
offsetBuffer: offsetBuffer,
|
||||
columnBuffer: columnBuffer,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Reset() {
|
||||
block.NumRows = 0
|
||||
block.NumColumns = 0
|
||||
block.Values = block.Values[:0]
|
||||
block.Columns = block.Columns[:0]
|
||||
block.info.reset()
|
||||
for _, buffer := range block.buffers {
|
||||
buffer.reset()
|
||||
}
|
||||
{
|
||||
block.offsets = nil
|
||||
block.buffers = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Write(serverInfo *ServerInfo, encoder *binary.Encoder) error {
|
||||
if serverInfo.Revision > 0 {
|
||||
if err := block.info.write(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := encoder.Uvarint(block.NumColumns); err != nil {
|
||||
return err
|
||||
}
|
||||
encoder.Uvarint(block.NumRows)
|
||||
defer func() {
|
||||
block.NumRows = 0
|
||||
for i := range block.offsets {
|
||||
block.offsets[i] = offset{}
|
||||
}
|
||||
}()
|
||||
for i, column := range block.Columns {
|
||||
encoder.String(column.Name())
|
||||
encoder.String(column.CHType())
|
||||
if len(block.buffers) == len(block.Columns) {
|
||||
for _, offsets := range block.offsets[i] {
|
||||
for _, offset := range offsets {
|
||||
if err := encoder.UInt64(uint64(offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := block.buffers[i].WriteTo(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockInfo struct {
|
||||
num1 uint64
|
||||
isOverflows bool
|
||||
num2 uint64
|
||||
bucketNum int32
|
||||
num3 uint64
|
||||
}
|
||||
|
||||
func (info *blockInfo) reset() {
|
||||
info.num1 = 0
|
||||
info.isOverflows = false
|
||||
info.num2 = 0
|
||||
info.bucketNum = 0
|
||||
info.num3 = 0
|
||||
}
|
||||
|
||||
func (info *blockInfo) read(decoder *binary.Decoder) error {
|
||||
var err error
|
||||
if info.num1, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.isOverflows, err = decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num2, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum, err = decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num3, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (info *blockInfo) write(encoder *binary.Encoder) error {
|
||||
if err := encoder.Uvarint(1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Bool(info.isOverflows); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(2); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum == 0 {
|
||||
info.bucketNum = -1
|
||||
}
|
||||
if err := encoder.Int32(info.bucketNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
Offset *binary.Encoder
|
||||
Column *binary.Encoder
|
||||
offsetBuffer *bytes.Buffer
|
||||
columnBuffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func (buf *buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var size int64
|
||||
{
|
||||
ln, err := buf.offsetBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
{
|
||||
ln, err := buf.columnBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (buf *buffer) reset() {
|
||||
buf.offsetBuffer.Reset()
|
||||
buf.columnBuffer.Reset()
|
||||
}
|
277
vendor/github.com/ClickHouse/clickhouse-go/lib/data/block_write_column.go
generated
vendored
277
vendor/github.com/ClickHouse/clickhouse-go/lib/data/block_write_column.go
generated
vendored
@ -1,277 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
func (block *Block) WriteDate(c int, v time.Time) error {
|
||||
_, offset := v.Zone()
|
||||
nday := (v.Unix() + int64(offset)) / 24 / 3600
|
||||
return block.buffers[c].Column.UInt16(uint16(nday))
|
||||
}
|
||||
|
||||
func (block *Block) WriteDateNullable(c int, v *time.Time) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt16(0)
|
||||
}
|
||||
return block.WriteDate(c, *v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteDateTime(c int, v time.Time) error {
|
||||
return block.buffers[c].Column.UInt32(uint32(v.Unix()))
|
||||
}
|
||||
|
||||
func (block *Block) WriteDateTimeNullable(c int, v *time.Time) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt32(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt32(uint32(v.Unix()))
|
||||
}
|
||||
|
||||
func (block *Block) WriteBool(c int, v bool) error {
|
||||
if v {
|
||||
return block.buffers[c].Column.UInt8(1)
|
||||
}
|
||||
return block.buffers[c].Column.UInt8(0)
|
||||
}
|
||||
|
||||
func (block *Block) WriteBoolNullable(c int, v *bool) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil || !(*v) {
|
||||
return block.buffers[c].Column.UInt8(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt8(1)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt8(c int, v int8) error {
|
||||
return block.buffers[c].Column.Int8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt8Nullable(c int, v *int8) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Int8(0)
|
||||
}
|
||||
return block.buffers[c].Column.Int8(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt16(c int, v int16) error {
|
||||
return block.buffers[c].Column.Int16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt16Nullable(c int, v *int16) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Int16(0)
|
||||
}
|
||||
return block.buffers[c].Column.Int16(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt32(c int, v int32) error {
|
||||
return block.buffers[c].Column.Int32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt32Nullable(c int, v *int32) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Int32(0)
|
||||
}
|
||||
return block.buffers[c].Column.Int32(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt64(c int, v int64) error {
|
||||
return block.buffers[c].Column.Int64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt64Nullable(c int, v *int64) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Int64(0)
|
||||
}
|
||||
return block.buffers[c].Column.Int64(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt8(c int, v uint8) error {
|
||||
return block.buffers[c].Column.UInt8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt8Nullable(c int, v *uint8) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt8(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt8(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt16(c int, v uint16) error {
|
||||
return block.buffers[c].Column.UInt16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt16Nullable(c int, v *uint16) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt16(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt16(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt32(c int, v uint32) error {
|
||||
return block.buffers[c].Column.UInt32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt32Nullable(c int, v *uint32) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt32(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt32(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt64(c int, v uint64) error {
|
||||
return block.buffers[c].Column.UInt64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt64Nullable(c int, v *uint64) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.UInt64(0)
|
||||
}
|
||||
return block.buffers[c].Column.UInt64(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat32(c int, v float32) error {
|
||||
return block.buffers[c].Column.Float32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat32Nullable(c int, v *float32) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Float32(0)
|
||||
}
|
||||
return block.buffers[c].Column.Float32(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat64(c int, v float64) error {
|
||||
return block.buffers[c].Column.Float64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat64Nullable(c int, v *float64) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.buffers[c].Column.Float64(0)
|
||||
}
|
||||
return block.buffers[c].Column.Float64(*v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteBytes(c int, v []byte) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteBytesNullable(c int, v *[]byte) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.WriteBytes(c, []byte{})
|
||||
}
|
||||
return block.WriteBytes(c, *v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteString(c int, v string) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(binary.Str2Bytes(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteStringNullable(c int, v *string) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.WriteString(c, "")
|
||||
}
|
||||
return block.WriteString(c, *v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFixedString(c int, v []byte) error {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFixedStringNullable(c int, v *[]byte) error {
|
||||
writer := block.Columns[c].(*column.Nullable)
|
||||
return writer.WriteNull(block.buffers[c].Offset, block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteIP(c int, v net.IP) error {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteIPNullable(c int, v net.IP) error {
|
||||
writer := block.Columns[c].(*column.Nullable)
|
||||
return writer.WriteNull(block.buffers[c].Offset, block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteArray(c int, v interface{}) error {
|
||||
return block.WriteArrayWithValue(c, newValue(reflect.ValueOf(v)))
|
||||
}
|
||||
|
||||
func (block *Block) WriteArrayWithValue(c int, value Value) error {
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
return block.writeArray(block.Columns[c], value, c, 1)
|
||||
}
|
||||
|
||||
func (block *Block) WriteArrayNullable(c int, v *interface{}) error {
|
||||
if err := block.buffers[c].Offset.Bool(v == nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if v == nil {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, []string{})
|
||||
}
|
||||
return block.WriteArray(c, *v)
|
||||
}
|
29
vendor/github.com/ClickHouse/clickhouse-go/lib/data/client_info.go
generated
vendored
29
vendor/github.com/ClickHouse/clickhouse-go/lib/data/client_info.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
const ClientName = "Golang SQLDriver"
|
||||
|
||||
const (
|
||||
ClickHouseRevision = 54213
|
||||
ClickHouseDBMSVersionMajor = 1
|
||||
ClickHouseDBMSVersionMinor = 1
|
||||
)
|
||||
|
||||
type ClientInfo struct{}
|
||||
|
||||
func (ClientInfo) Write(encoder *binary.Encoder) error {
|
||||
encoder.String(ClientName)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMajor)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMinor)
|
||||
encoder.Uvarint(ClickHouseRevision)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ClientInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d", ClientName, ClickHouseDBMSVersionMajor, ClickHouseDBMSVersionMinor, ClickHouseRevision)
|
||||
}
|
47
vendor/github.com/ClickHouse/clickhouse-go/lib/data/server_info.go
generated
vendored
47
vendor/github.com/ClickHouse/clickhouse-go/lib/data/server_info.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
//"io"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
type ServerInfo struct {
|
||||
Name string
|
||||
Revision uint64
|
||||
MinorVersion uint64
|
||||
MajorVersion uint64
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (srv *ServerInfo) Read(decoder *binary.Decoder) (err error) {
|
||||
if srv.Name, err = decoder.String(); err != nil {
|
||||
return fmt.Errorf("could not read server name: %v", err)
|
||||
}
|
||||
if srv.MajorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server major version: %v", err)
|
||||
}
|
||||
if srv.MinorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server minor version: %v", err)
|
||||
}
|
||||
if srv.Revision, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server revision: %v", err)
|
||||
}
|
||||
if srv.Revision >= protocol.DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE {
|
||||
timezone, err := decoder.String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read server timezone: %v", err)
|
||||
}
|
||||
if srv.Timezone, err = time.LoadLocation(timezone); err != nil {
|
||||
return fmt.Errorf("could not load time location: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv ServerInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d (%s)", srv.Name, srv.MajorVersion, srv.MinorVersion, srv.Revision, srv.Timezone)
|
||||
}
|
33
vendor/github.com/ClickHouse/clickhouse-go/lib/data/value.go
generated
vendored
33
vendor/github.com/ClickHouse/clickhouse-go/lib/data/value.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package data
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Value is a writable value.
|
||||
type Value interface {
|
||||
// Kind returns value's Kind.
|
||||
Kind() reflect.Kind
|
||||
|
||||
// Len returns value's length.
|
||||
// It panics if value's Kind is not Array, Chan, Map, Slice, or String.
|
||||
Len() int
|
||||
|
||||
// Index returns value's i'th element.
|
||||
// It panics if value's Kind is not Array, Slice, or String or i is out of range.
|
||||
Index(i int) Value
|
||||
|
||||
// Interface returns value's current value as an interface{}.
|
||||
Interface() interface{}
|
||||
}
|
||||
|
||||
// value is a wrapper that wraps reflect.Value to comply with Value interface.
|
||||
type value struct {
|
||||
reflect.Value
|
||||
}
|
||||
|
||||
func newValue(v reflect.Value) Value {
|
||||
return value{Value: v}
|
||||
}
|
||||
|
||||
func (v value) Index(i int) Value {
|
||||
return newValue(v.Value.Index(i))
|
||||
}
|
23
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/LICENSE
generated
vendored
23
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/LICENSE
generated
vendored
@ -1,23 +0,0 @@
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
7
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/doc.go
generated
vendored
7
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/doc.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
// Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
// @LINK: https://github.com/bkaradzic/go-lz4
|
||||
// @NOTE: The code is modified to be high performance and less memory usage
|
||||
|
||||
package lz4
|
23
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/fuzz.go
generated
vendored
23
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/fuzz.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// +build gofuzz
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ln := binary.LittleEndian.Uint32(data)
|
||||
if ln > (1 << 21) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if _, err := Decode(nil, data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
179
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/reader.go
generated
vendored
179
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/reader.go
generated
vendored
@ -1,179 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) (int, error) {
|
||||
d := decoder{src: src, dst: dst, spos: 0}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return len(d.dst), d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return len(d.dst), nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
203
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/writer.go
generated
vendored
203
vendor/github.com/ClickHouse/clickhouse-go/lib/lz4/writer.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 16
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
ErrEncodeTooSmall = errors.New("encode buffer too small")
|
||||
|
||||
hashPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]uint32, hashTableSize)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) (compressedSize int, error error) {
|
||||
if len(src) >= MaxInputSize {
|
||||
return 0, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
return 0, ErrEncodeTooSmall
|
||||
}
|
||||
|
||||
hashTable := hashPool.Get().([]uint32)
|
||||
for i := range hashTable {
|
||||
hashTable[i] = 0
|
||||
}
|
||||
e := encoder{src: src, dst: dst, hashTable: hashTable}
|
||||
defer func() {
|
||||
hashPool.Put(hashTable)
|
||||
}()
|
||||
// binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
// e.dpos = 0
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return int(e.dpos), nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
4
vendor/github.com/ClickHouse/clickhouse-go/lib/protocol/README.md
generated
vendored
4
vendor/github.com/ClickHouse/clickhouse-go/lib/protocol/README.md
generated
vendored
@ -1,4 +0,0 @@
|
||||
# ClickHouse Native protocol
|
||||
|
||||
# Handshake
|
||||
|
35
vendor/github.com/ClickHouse/clickhouse-go/lib/protocol/protocol.go
generated
vendored
35
vendor/github.com/ClickHouse/clickhouse-go/lib/protocol/protocol.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package protocol
|
||||
|
||||
const (
|
||||
DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058
|
||||
DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060
|
||||
)
|
||||
|
||||
const (
|
||||
ClientHello = 0
|
||||
ClientQuery = 1
|
||||
ClientData = 2
|
||||
ClientCancel = 3
|
||||
ClientPing = 4
|
||||
)
|
||||
|
||||
const (
|
||||
CompressEnable uint64 = 1
|
||||
CompressDisable uint64 = 0
|
||||
)
|
||||
|
||||
const (
|
||||
StateComplete = 2
|
||||
)
|
||||
|
||||
const (
|
||||
ServerHello = 0
|
||||
ServerData = 1
|
||||
ServerException = 2
|
||||
ServerProgress = 3
|
||||
ServerPong = 4
|
||||
ServerEndOfStream = 5
|
||||
ServerProfileInfo = 6
|
||||
ServerTotals = 7
|
||||
ServerExtremes = 8
|
||||
)
|
48
vendor/github.com/ClickHouse/clickhouse-go/lib/types/date.go
generated
vendored
48
vendor/github.com/ClickHouse/clickhouse-go/lib/types/date.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
// Timezoneless date/datetime types
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.Date(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type Date time.Time
|
||||
|
||||
func (date Date) Value() (driver.Value, error) {
|
||||
return date.convert(), nil
|
||||
}
|
||||
|
||||
func (date Date) convert() time.Time {
|
||||
return time.Date(time.Time(date).Year(), time.Time(date).Month(), time.Time(date).Day(), 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.DateTime(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type DateTime time.Time
|
||||
|
||||
func (datetime DateTime) Value() (driver.Value, error) {
|
||||
return datetime.convert(), nil
|
||||
}
|
||||
|
||||
func (datetime DateTime) convert() time.Time {
|
||||
return time.Date(
|
||||
time.Time(datetime).Year(),
|
||||
time.Time(datetime).Month(),
|
||||
time.Time(datetime).Day(),
|
||||
time.Time(datetime).Hour(),
|
||||
time.Time(datetime).Minute(),
|
||||
time.Time(datetime).Second(),
|
||||
1,
|
||||
time.UTC,
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Valuer = Date{}
|
||||
_ driver.Valuer = DateTime{}
|
||||
)
|
99
vendor/github.com/ClickHouse/clickhouse-go/lib/types/uuid.go
generated
vendored
99
vendor/github.com/ClickHouse/clickhouse-go/lib/types/uuid.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var InvalidUUIDFormatError = errors.New("invalid UUID format")
|
||||
|
||||
// this type will be deprecated because the ClickHouse server (>=1.1.54276) has a built-in type UUID
|
||||
type UUID string
|
||||
|
||||
func (str UUID) Value() (driver.Value, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str *UUID) Scan(v interface{}) error {
|
||||
var src []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
src = []byte(v)
|
||||
case []byte:
|
||||
src = v
|
||||
}
|
||||
|
||||
if len(src) != 16 {
|
||||
return fmt.Errorf("invalid UUID length: %d", len(src))
|
||||
}
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
*str = UUID(uuid[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, InvalidUUIDFormatError
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, InvalidUUIDFormatError
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
|
||||
var _ driver.Valuer = UUID("")
|
286
vendor/github.com/ClickHouse/clickhouse-go/query_settings.go
generated
vendored
286
vendor/github.com/ClickHouse/clickhouse-go/query_settings.go
generated
vendored
@ -1,286 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/binary"
|
||||
)
|
||||
|
||||
type querySettingType int
|
||||
|
||||
// all possible query setting's data type
|
||||
const (
|
||||
uintQS querySettingType = iota + 1
|
||||
intQS
|
||||
boolQS
|
||||
timeQS
|
||||
)
|
||||
|
||||
// description of single query setting
|
||||
type querySettingInfo struct {
|
||||
name string
|
||||
qsType querySettingType
|
||||
}
|
||||
|
||||
// all possible query settings
|
||||
var querySettingList = []querySettingInfo{
|
||||
{"min_compress_block_size", uintQS},
|
||||
{"max_compress_block_size", uintQS},
|
||||
{"max_block_size", uintQS},
|
||||
{"max_insert_block_size", uintQS},
|
||||
{"min_insert_block_size_rows", uintQS},
|
||||
{"min_insert_block_size_bytes", uintQS},
|
||||
{"max_read_buffer_size", uintQS},
|
||||
{"max_distributed_connections", uintQS},
|
||||
{"max_query_size", uintQS},
|
||||
{"interactive_delay", uintQS},
|
||||
{"poll_interval", uintQS},
|
||||
{"distributed_connections_pool_size", uintQS},
|
||||
{"connections_with_failover_max_tries", uintQS},
|
||||
{"background_pool_size", uintQS},
|
||||
{"background_schedule_pool_size", uintQS},
|
||||
{"replication_alter_partitions_sync", uintQS},
|
||||
{"replication_alter_columns_timeout", uintQS},
|
||||
{"min_count_to_compile", uintQS},
|
||||
{"min_count_to_compile_expression", uintQS},
|
||||
{"group_by_two_level_threshold", uintQS},
|
||||
{"group_by_two_level_threshold_bytes", uintQS},
|
||||
{"aggregation_memory_efficient_merge_threads", uintQS},
|
||||
{"max_parallel_replicas", uintQS},
|
||||
{"parallel_replicas_count", uintQS},
|
||||
{"parallel_replica_offset", uintQS},
|
||||
{"merge_tree_min_rows_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_bytes_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_rows_for_seek", uintQS},
|
||||
{"merge_tree_min_bytes_for_seek", uintQS},
|
||||
{"merge_tree_coarse_index_granularity", uintQS},
|
||||
{"merge_tree_max_rows_to_use_cache", uintQS},
|
||||
{"merge_tree_max_bytes_to_use_cache", uintQS},
|
||||
{"mysql_max_rows_to_insert", uintQS},
|
||||
{"optimize_min_equality_disjunction_chain_length", uintQS},
|
||||
{"min_bytes_to_use_direct_io", uintQS},
|
||||
{"mark_cache_min_lifetime", uintQS},
|
||||
{"priority", uintQS},
|
||||
{"log_queries_cut_to_length", uintQS},
|
||||
{"max_concurrent_queries_for_user", uintQS},
|
||||
{"insert_quorum", uintQS},
|
||||
{"select_sequential_consistency", uintQS},
|
||||
{"table_function_remote_max_addresses", uintQS},
|
||||
{"read_backoff_max_throughput", uintQS},
|
||||
{"read_backoff_min_events", uintQS},
|
||||
{"output_format_pretty_max_rows", uintQS},
|
||||
{"output_format_pretty_max_column_pad_width", uintQS},
|
||||
{"output_format_parquet_row_group_size", uintQS},
|
||||
{"http_headers_progress_interval_ms", uintQS},
|
||||
{"input_format_allow_errors_num", uintQS},
|
||||
{"preferred_block_size_bytes", uintQS},
|
||||
{"max_replica_delay_for_distributed_queries", uintQS},
|
||||
{"preferred_max_column_in_block_size_bytes", uintQS},
|
||||
{"insert_distributed_timeout", uintQS},
|
||||
{"odbc_max_field_size", uintQS},
|
||||
{"max_rows_to_read", uintQS},
|
||||
{"max_bytes_to_read", uintQS},
|
||||
{"max_rows_to_group_by", uintQS},
|
||||
{"max_bytes_before_external_group_by", uintQS},
|
||||
{"max_rows_to_sort", uintQS},
|
||||
{"max_bytes_to_sort", uintQS},
|
||||
{"max_bytes_before_external_sort", uintQS},
|
||||
{"max_bytes_before_remerge_sort", uintQS},
|
||||
{"max_result_rows", uintQS},
|
||||
{"max_result_bytes", uintQS},
|
||||
{"min_execution_speed", uintQS},
|
||||
{"max_execution_speed", uintQS},
|
||||
{"min_execution_speed_bytes", uintQS},
|
||||
{"max_execution_speed_bytes", uintQS},
|
||||
{"max_columns_to_read", uintQS},
|
||||
{"max_temporary_columns", uintQS},
|
||||
{"max_temporary_non_const_columns", uintQS},
|
||||
{"max_subquery_depth", uintQS},
|
||||
{"max_pipeline_depth", uintQS},
|
||||
{"max_ast_depth", uintQS},
|
||||
{"max_ast_elements", uintQS},
|
||||
{"max_expanded_ast_elements", uintQS},
|
||||
{"readonly", uintQS},
|
||||
{"max_rows_in_set", uintQS},
|
||||
{"max_bytes_in_set", uintQS},
|
||||
{"max_rows_in_join", uintQS},
|
||||
{"max_bytes_in_join", uintQS},
|
||||
{"max_rows_to_transfer", uintQS},
|
||||
{"max_bytes_to_transfer", uintQS},
|
||||
{"max_rows_in_distinct", uintQS},
|
||||
{"max_bytes_in_distinct", uintQS},
|
||||
{"max_memory_usage", uintQS},
|
||||
{"max_memory_usage_for_user", uintQS},
|
||||
{"max_memory_usage_for_all_queries", uintQS},
|
||||
{"max_network_bandwidth", uintQS},
|
||||
{"max_network_bytes", uintQS},
|
||||
{"max_network_bandwidth_for_user", uintQS},
|
||||
{"max_network_bandwidth_for_all_users", uintQS},
|
||||
{"low_cardinality_max_dictionary_size", uintQS},
|
||||
{"max_fetch_partition_retries_count", uintQS},
|
||||
{"http_max_multipart_form_data_size", uintQS},
|
||||
{"max_partitions_per_insert_block", uintQS},
|
||||
{"max_threads", uintQS},
|
||||
{"optimize_skip_unused_shards_nesting", uintQS},
|
||||
{"force_optimize_skip_unused_shards", uintQS},
|
||||
{"force_optimize_skip_unused_shards_nesting", uintQS},
|
||||
|
||||
{"network_zstd_compression_level", intQS},
|
||||
{"http_zlib_compression_level", intQS},
|
||||
{"distributed_ddl_task_timeout", intQS},
|
||||
|
||||
{"extremes", boolQS},
|
||||
{"use_uncompressed_cache", boolQS},
|
||||
{"replace_running_query", boolQS},
|
||||
{"distributed_directory_monitor_batch_inserts", boolQS},
|
||||
{"optimize_move_to_prewhere", boolQS},
|
||||
{"compile", boolQS},
|
||||
{"allow_suspicious_low_cardinality_types", boolQS},
|
||||
{"compile_expressions", boolQS},
|
||||
{"distributed_aggregation_memory_efficient", boolQS},
|
||||
{"skip_unavailable_shards", boolQS},
|
||||
{"distributed_group_by_no_merge", boolQS},
|
||||
{"optimize_skip_unused_shards", boolQS},
|
||||
{"merge_tree_uniform_read_distribution", boolQS},
|
||||
{"force_index_by_date", boolQS},
|
||||
{"force_primary_key", boolQS},
|
||||
{"log_queries", boolQS},
|
||||
{"insert_deduplicate", boolQS},
|
||||
{"enable_http_compression", boolQS},
|
||||
{"http_native_compression_disable_checksumming_on_decompress", boolQS},
|
||||
{"output_format_write_statistics", boolQS},
|
||||
{"add_http_cors_header", boolQS},
|
||||
{"input_format_skip_unknown_fields", boolQS},
|
||||
{"input_format_with_names_use_header", boolQS},
|
||||
{"input_format_import_nested_json", boolQS},
|
||||
{"input_format_defaults_for_omitted_fields", boolQS},
|
||||
{"input_format_values_interpret_expressions", boolQS},
|
||||
{"output_format_json_quote_64bit_integers", boolQS},
|
||||
{"output_format_json_quote_denormals", boolQS},
|
||||
{"output_format_json_escape_forward_slashes", boolQS},
|
||||
{"output_format_pretty_color", boolQS},
|
||||
{"use_client_time_zone", boolQS},
|
||||
{"send_progress_in_http_headers", boolQS},
|
||||
{"fsync_metadata", boolQS},
|
||||
{"join_use_nulls", boolQS},
|
||||
{"fallback_to_stale_replicas_for_distributed_queries", boolQS},
|
||||
{"insert_distributed_sync", boolQS},
|
||||
{"insert_allow_materialized_columns", boolQS},
|
||||
{"optimize_throw_if_noop", boolQS},
|
||||
{"use_index_for_in_with_subqueries", boolQS},
|
||||
{"empty_result_for_aggregation_by_empty_set", boolQS},
|
||||
{"allow_distributed_ddl", boolQS},
|
||||
{"join_any_take_last_row", boolQS},
|
||||
{"format_csv_allow_single_quotes", boolQS},
|
||||
{"format_csv_allow_double_quotes", boolQS},
|
||||
{"log_profile_events", boolQS},
|
||||
{"log_query_settings", boolQS},
|
||||
{"log_query_threads", boolQS},
|
||||
{"enable_optimize_predicate_expression", boolQS},
|
||||
{"low_cardinality_use_single_dictionary_for_part", boolQS},
|
||||
{"decimal_check_overflow", boolQS},
|
||||
{"prefer_localhost_replica", boolQS},
|
||||
//{"asterisk_left_columns_only", boolQS},
|
||||
{"calculate_text_stack_trace", boolQS},
|
||||
{"allow_ddl", boolQS},
|
||||
{"parallel_view_processing", boolQS},
|
||||
{"enable_debug_queries", boolQS},
|
||||
{"enable_unaligned_array_join", boolQS},
|
||||
{"low_cardinality_allow_in_native_format", boolQS},
|
||||
{"allow_experimental_multiple_joins_emulation", boolQS},
|
||||
{"allow_experimental_cross_to_join_conversion", boolQS},
|
||||
{"cancel_http_readonly_queries_on_client_close", boolQS},
|
||||
{"external_table_functions_use_nulls", boolQS},
|
||||
{"allow_experimental_data_skipping_indices", boolQS},
|
||||
{"allow_hyperscan", boolQS},
|
||||
{"allow_simdjson", boolQS},
|
||||
|
||||
{"connect_timeout", timeQS},
|
||||
{"connect_timeout_with_failover_ms", timeQS},
|
||||
{"receive_timeout", timeQS},
|
||||
{"send_timeout", timeQS},
|
||||
{"tcp_keep_alive_timeout", timeQS},
|
||||
{"queue_max_wait_ms", timeQS},
|
||||
{"distributed_directory_monitor_sleep_time_ms", timeQS},
|
||||
{"insert_quorum_timeout", timeQS},
|
||||
{"read_backoff_min_latency_ms", timeQS},
|
||||
{"read_backoff_min_interval_between_events_ms", timeQS},
|
||||
{"stream_flush_interval_ms", timeQS},
|
||||
{"stream_poll_timeout_ms", timeQS},
|
||||
{"http_connection_timeout", timeQS},
|
||||
{"http_send_timeout", timeQS},
|
||||
{"http_receive_timeout", timeQS},
|
||||
{"max_execution_time", timeQS},
|
||||
{"timeout_before_checking_execution_speed", timeQS},
|
||||
}
|
||||
|
||||
type querySettingValueEncoder func(enc *binary.Encoder) error
|
||||
|
||||
type querySettings struct {
|
||||
settings map[string]querySettingValueEncoder
|
||||
settingsStr string // used for debug output
|
||||
}
|
||||
|
||||
func makeQuerySettings(query url.Values) (*querySettings, error) {
|
||||
qs := &querySettings{
|
||||
settings: make(map[string]querySettingValueEncoder),
|
||||
settingsStr: "",
|
||||
}
|
||||
|
||||
for _, info := range querySettingList {
|
||||
valueStr := query.Get(info.name)
|
||||
if valueStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch info.qsType {
|
||||
case uintQS, intQS, timeQS:
|
||||
value, err := strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
case boolQS:
|
||||
valueBool, err := strconv.ParseBool(valueStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := uint64(0)
|
||||
if valueBool {
|
||||
value = 1
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("query setting %s has unsupported data type", info.name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if qs.settingsStr != "" {
|
||||
qs.settingsStr += "&"
|
||||
}
|
||||
qs.settingsStr += info.name + "=" + valueStr
|
||||
}
|
||||
|
||||
return qs, nil
|
||||
}
|
||||
|
||||
func (qs *querySettings) IsEmpty() bool {
|
||||
return len(qs.settings) == 0
|
||||
}
|
||||
|
||||
func (qs *querySettings) Serialize(enc *binary.Encoder) error {
|
||||
for name, fn := range qs.settings {
|
||||
if err := enc.String(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
8
vendor/github.com/ClickHouse/clickhouse-go/result.go
generated
vendored
8
vendor/github.com/ClickHouse/clickhouse-go/result.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import "errors"
|
||||
|
||||
type result struct{}
|
||||
|
||||
func (*result) LastInsertId() (int64, error) { return 0, errors.New("LastInsertId is not supported") }
|
||||
func (*result) RowsAffected() (int64, error) { return 0, errors.New("RowsAffected is not supported") }
|
182
vendor/github.com/ClickHouse/clickhouse-go/rows.go
generated
vendored
182
vendor/github.com/ClickHouse/clickhouse-go/rows.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/column"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
"github.com/ClickHouse/clickhouse-go/lib/protocol"
|
||||
)
|
||||
|
||||
type rows struct {
|
||||
ch *clickhouse
|
||||
err error
|
||||
mutex sync.RWMutex
|
||||
finish func()
|
||||
offset int
|
||||
block *data.Block
|
||||
totals *data.Block
|
||||
extremes *data.Block
|
||||
stream chan *data.Block
|
||||
columns []string
|
||||
blockColumns []column.Column
|
||||
}
|
||||
|
||||
func (rows *rows) Columns() []string {
|
||||
return rows.columns
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeScanType(idx int) reflect.Type {
|
||||
return rows.blockColumns[idx].ScanType()
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeDatabaseTypeName(idx int) string {
|
||||
return rows.blockColumns[idx].CHType()
|
||||
}
|
||||
|
||||
func (rows *rows) Next(dest []driver.Value) error {
|
||||
if rows.block == nil || int(rows.block.NumRows) <= rows.offset {
|
||||
switch block, ok := <-rows.stream; true {
|
||||
case !ok:
|
||||
if err := rows.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
return io.EOF
|
||||
default:
|
||||
rows.block = block
|
||||
rows.offset = 0
|
||||
}
|
||||
}
|
||||
for i := range dest {
|
||||
dest[i] = rows.block.Values[i][rows.offset]
|
||||
}
|
||||
rows.offset++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) HasNextResultSet() bool {
|
||||
return rows.totals != nil || rows.extremes != nil
|
||||
}
|
||||
|
||||
func (rows *rows) NextResultSet() error {
|
||||
switch {
|
||||
case rows.totals != nil:
|
||||
rows.block = rows.totals
|
||||
rows.offset = 0
|
||||
rows.totals = nil
|
||||
case rows.extremes != nil:
|
||||
rows.block = rows.extremes
|
||||
rows.offset = 0
|
||||
rows.extremes = nil
|
||||
default:
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) receiveData() error {
|
||||
defer close(rows.stream)
|
||||
var (
|
||||
err error
|
||||
packet uint64
|
||||
progress *progress
|
||||
profileInfo *profileInfo
|
||||
)
|
||||
for {
|
||||
if packet, err = rows.ch.decoder.Uvarint(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
rows.ch.logf("[rows] <- exception")
|
||||
return rows.setError(rows.ch.exception())
|
||||
case protocol.ServerProgress:
|
||||
if progress, err = rows.ch.progress(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
if profileInfo, err = rows.ch.profileInfo(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData, protocol.ServerTotals, protocol.ServerExtremes:
|
||||
var (
|
||||
block *data.Block
|
||||
begin = time.Now()
|
||||
)
|
||||
if block, err = rows.ch.readBlock(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- data: packet=%d, columns=%d, rows=%d, elapsed=%s", packet, block.NumColumns, block.NumRows, time.Since(begin))
|
||||
if block.NumRows == 0 {
|
||||
continue
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerData:
|
||||
rows.stream <- block
|
||||
case protocol.ServerTotals:
|
||||
rows.totals = block
|
||||
case protocol.ServerExtremes:
|
||||
rows.extremes = block
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
rows.ch.logf("[rows] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
rows.ch.conn.Close()
|
||||
rows.ch.logf("[rows] unexpected packet [%d]", packet)
|
||||
return rows.setError(fmt.Errorf("[rows] unexpected packet [%d] from server", packet))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rows *rows) Close() error {
|
||||
rows.ch.logf("[rows] close")
|
||||
rows.columns = nil
|
||||
for range rows.stream {
|
||||
}
|
||||
rows.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) error() error {
|
||||
rows.mutex.RLock()
|
||||
defer rows.mutex.RUnlock()
|
||||
return rows.err
|
||||
}
|
||||
|
||||
func (rows *rows) setError(err error) error {
|
||||
rows.mutex.Lock()
|
||||
rows.err = err
|
||||
rows.mutex.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeNullable(idx int) (nullable, ok bool) {
|
||||
_, ok = rows.blockColumns[idx].(*column.Nullable)
|
||||
return ok, true
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypePrecisionScale(idx int) (precision, scale int64, ok bool) {
|
||||
decimalVal, ok := rows.blockColumns[idx].(*column.Decimal)
|
||||
if !ok {
|
||||
if nullable, nullOk := rows.blockColumns[idx].(*column.Nullable); nullOk {
|
||||
decimalVal, ok = nullable.GetColumn().(*column.Decimal)
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
return int64(decimalVal.GetPrecision()), int64(decimalVal.GetScale()), ok
|
||||
|
||||
}
|
||||
return 0, 0, false
|
||||
}
|
215
vendor/github.com/ClickHouse/clickhouse-go/stmt.go
generated
vendored
215
vendor/github.com/ClickHouse/clickhouse-go/stmt.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"unicode"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
type stmt struct {
|
||||
ch *clickhouse
|
||||
query string
|
||||
counter int
|
||||
numInput int
|
||||
isInsert bool
|
||||
}
|
||||
|
||||
var emptyResult = &result{}
|
||||
|
||||
type key string
|
||||
|
||||
var queryIDKey key
|
||||
|
||||
//Put query ID into context and use it in ExecContext or QueryContext
|
||||
func WithQueryID(ctx context.Context, queryID string) context.Context {
|
||||
return context.WithValue(ctx, queryIDKey, queryID)
|
||||
}
|
||||
|
||||
func (stmt *stmt) NumInput() int {
|
||||
switch {
|
||||
case stmt.ch.block != nil:
|
||||
return len(stmt.ch.block.Columns)
|
||||
case stmt.numInput < 0:
|
||||
return 0
|
||||
}
|
||||
return stmt.numInput
|
||||
}
|
||||
|
||||
func (stmt *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
return stmt.execContext(context.Background(), args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs := make([]driver.Value, len(args))
|
||||
for i, nv := range args {
|
||||
dargs[i] = nv.Value
|
||||
}
|
||||
return stmt.execContext(ctx, dargs)
|
||||
}
|
||||
|
||||
func (stmt *stmt) execContext(ctx context.Context, args []driver.Value) (driver.Result, error) {
|
||||
if stmt.isInsert {
|
||||
stmt.counter++
|
||||
if err := stmt.ch.block.AppendRow(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if (stmt.counter % stmt.ch.blockSize) == 0 {
|
||||
stmt.ch.logf("[exec] flush block")
|
||||
if err := stmt.ch.writeBlock(stmt.ch.block, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.encoder.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
query, externalTables := stmt.bind(convertOldArgs(args))
|
||||
if err := stmt.ch.sendQuery(ctx, query, externalTables); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.process(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
return stmt.queryContext(context.Background(), convertOldArgs(args))
|
||||
}
|
||||
|
||||
func (stmt *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
return stmt.queryContext(ctx, args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) queryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
finish := stmt.ch.watchCancel(ctx)
|
||||
query, externalTables := stmt.bind(args)
|
||||
if err := stmt.ch.sendQuery(ctx, query, externalTables); err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
meta, err := stmt.ch.readMeta()
|
||||
if err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
rows := rows{
|
||||
ch: stmt.ch,
|
||||
finish: finish,
|
||||
stream: make(chan *data.Block, 50),
|
||||
columns: meta.ColumnNames(),
|
||||
blockColumns: meta.Columns,
|
||||
}
|
||||
go rows.receiveData()
|
||||
return &rows, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Close() error {
|
||||
stmt.ch.logf("[stmt] close")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) bind(args []driver.NamedValue) (string, []ExternalTable) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
index int
|
||||
keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
offset = newMatcher("offset")
|
||||
between = newMatcher("between")
|
||||
and = newMatcher("and")
|
||||
in = newMatcher("in")
|
||||
from = newMatcher("from")
|
||||
join = newMatcher("join")
|
||||
subSelect = newMatcher("select")
|
||||
externalTables = make([]ExternalTable, 0)
|
||||
)
|
||||
switch {
|
||||
case stmt.NumInput() != 0:
|
||||
reader := bytes.NewReader([]byte(stmt.query))
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
switch char {
|
||||
case '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
for _, v := range args {
|
||||
if len(v.Name) != 0 && v.Name == param {
|
||||
switch v := v.Value.(type) {
|
||||
case ExternalTable:
|
||||
buf.WriteString(v.Name)
|
||||
externalTables = append(externalTables, v)
|
||||
default:
|
||||
buf.WriteString(quote(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case '?':
|
||||
if keyword && index < len(args) && len(args[index].Name) == 0 {
|
||||
switch v := args[index].Value.(type) {
|
||||
case ExternalTable:
|
||||
buf.WriteString(v.Name)
|
||||
externalTables = append(externalTables, v)
|
||||
default:
|
||||
buf.WriteString(quote(v))
|
||||
}
|
||||
index++
|
||||
} else {
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '+',
|
||||
char == '-',
|
||||
char == '*',
|
||||
char == '/',
|
||||
char == '[':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || offset.matchRune(char) || like.matchRune(char) ||
|
||||
in.matchRune(char) || from.matchRune(char) || join.matchRune(char) || subSelect.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && unicode.IsSpace(char)
|
||||
}
|
||||
}
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
buf.WriteString(stmt.query)
|
||||
}
|
||||
return buf.String(), externalTables
|
||||
}
|
||||
|
||||
func convertOldArgs(args []driver.Value) []driver.NamedValue {
|
||||
dargs := make([]driver.NamedValue, len(args))
|
||||
for i, v := range args {
|
||||
dargs[i] = driver.NamedValue{
|
||||
Ordinal: i + 1,
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
return dargs
|
||||
}
|
44
vendor/github.com/ClickHouse/clickhouse-go/tls_config.go
generated
vendored
44
vendor/github.com/ClickHouse/clickhouse-go/tls_config.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Based on the original implementation in the project go-sql-driver/mysql:
|
||||
// https://github.com/go-sql-driver/mysql/blob/master/utils.go
|
||||
|
||||
var (
|
||||
tlsConfigLock sync.RWMutex
|
||||
tlsConfigRegistry map[string]*tls.Config
|
||||
)
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry == nil {
|
||||
tlsConfigRegistry = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegistry[key] = config
|
||||
tlsConfigLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry != nil {
|
||||
delete(tlsConfigRegistry, key)
|
||||
}
|
||||
tlsConfigLock.Unlock()
|
||||
}
|
||||
|
||||
func getTLSConfigClone(key string) (config *tls.Config) {
|
||||
tlsConfigLock.RLock()
|
||||
if v, ok := tlsConfigRegistry[key]; ok {
|
||||
config = v.Clone()
|
||||
}
|
||||
tlsConfigLock.RUnlock()
|
||||
return
|
||||
}
|
31
vendor/github.com/ClickHouse/clickhouse-go/word_matcher.go
generated
vendored
31
vendor/github.com/ClickHouse/clickhouse-go/word_matcher.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// wordMatcher is a simple automata to match a single word (case insensitive)
|
||||
type wordMatcher struct {
|
||||
word []rune
|
||||
position uint8
|
||||
}
|
||||
|
||||
// newMatcher returns matcher for word needle
|
||||
func newMatcher(needle string) *wordMatcher {
|
||||
return &wordMatcher{word: []rune(strings.ToUpper(needle)),
|
||||
position: 0}
|
||||
}
|
||||
|
||||
func (m *wordMatcher) matchRune(r rune) bool {
|
||||
if m.word[m.position] == unicode.ToUpper(r) {
|
||||
if m.position == uint8(len(m.word)-1) {
|
||||
m.position = 0
|
||||
return true
|
||||
}
|
||||
m.position++
|
||||
} else {
|
||||
m.position = 0
|
||||
}
|
||||
return false
|
||||
}
|
66
vendor/github.com/ClickHouse/clickhouse-go/write_column.go
generated
vendored
66
vendor/github.com/ClickHouse/clickhouse-go/write_column.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/lib/data"
|
||||
)
|
||||
|
||||
// Interface for Clickhouse driver
|
||||
type Clickhouse interface {
|
||||
Block() (*data.Block, error)
|
||||
Prepare(query string) (driver.Stmt, error)
|
||||
Begin() (driver.Tx, error)
|
||||
Commit() error
|
||||
Rollback() error
|
||||
Close() error
|
||||
WriteBlock(block *data.Block) error
|
||||
}
|
||||
|
||||
// Interface for Block allowing writes to individual columns
|
||||
type ColumnWriter interface {
|
||||
WriteDate(c int, v time.Time) error
|
||||
WriteDateNullable(c int, v *time.Time) error
|
||||
WriteDateTime(c int, v time.Time) error
|
||||
WriteDateTimeNullable(c int, v *time.Time) error
|
||||
WriteUInt8(c int, v uint8) error
|
||||
WriteUInt8Nullable(c int, v *uint8) error
|
||||
WriteUInt16(c int, v uint16) error
|
||||
WriteUInt16Nullable(c int, v *uint16) error
|
||||
WriteUInt32(c int, v uint32) error
|
||||
WriteUInt32Nullable(c int, v *uint32) error
|
||||
WriteUInt64(c int, v uint64) error
|
||||
WriteUInt64Nullable(c int, v *uint64) error
|
||||
WriteFloat32(c int, v float32) error
|
||||
WriteFloat32Nullable(c int, v *float32) error
|
||||
WriteFloat64(c int, v float64) error
|
||||
WriteFloat64Nullable(c int, v *float64) error
|
||||
WriteBytes(c int, v []byte) error
|
||||
WriteArray(c int, v interface{}) error
|
||||
WriteBytesNullable(c int, v *[]byte) error
|
||||
WriteArrayNullable(c int, v *interface{}) error
|
||||
WriteString(c int, v string) error
|
||||
WriteStringNullable(c int, v *string) error
|
||||
WriteFixedString(c int, v []byte) error
|
||||
WriteFixedStringNullable(c int, v *[]byte) error
|
||||
}
|
||||
|
||||
func OpenDirect(dsn string) (Clickhouse, error) {
|
||||
return open(dsn)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Block() (*data.Block, error) {
|
||||
if ch.block == nil {
|
||||
return nil, sql.ErrTxDone
|
||||
}
|
||||
return ch.block, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) WriteBlock(block *data.Block) error {
|
||||
if block == nil {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
return ch.writeBlock(block, "")
|
||||
}
|
19
vendor/github.com/andybalholm/brotli/LICENSE
generated
vendored
19
vendor/github.com/andybalholm/brotli/LICENSE
generated
vendored
@ -1,19 +0,0 @@
|
||||
Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
7
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
7
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
This package is a brotli compressor and decompressor implemented in Go.
|
||||
It was translated from the reference implementation (https://github.com/google/brotli)
|
||||
with the `c2go` tool at https://github.com/andybalholm/c2go.
|
||||
|
||||
I am using it in production with https://github.com/andybalholm/redwood.
|
||||
|
||||
API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc.
|
185
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
185
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Function to find backward reference copies. */
|
||||
|
||||
func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint {
|
||||
if distance <= max_distance {
|
||||
var distance_plus_3 uint = distance + 3
|
||||
var offset0 uint = distance_plus_3 - uint(dist_cache[0])
|
||||
var offset1 uint = distance_plus_3 - uint(dist_cache[1])
|
||||
if distance == uint(dist_cache[0]) {
|
||||
return 0
|
||||
} else if distance == uint(dist_cache[1]) {
|
||||
return 1
|
||||
} else if offset0 < 7 {
|
||||
return (0x9750468 >> (4 * offset0)) & 0xF
|
||||
} else if offset1 < 7 {
|
||||
return (0xFDB1ACE >> (4 * offset1)) & 0xF
|
||||
} else if distance == uint(dist_cache[2]) {
|
||||
return 2
|
||||
} else if distance == uint(dist_cache[3]) {
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
return distance + numDistanceShortCodes - 1
|
||||
}
|
||||
|
||||
var hasherSearchResultPool sync.Pool
|
||||
|
||||
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var insert_length uint = *last_insert_len
|
||||
var pos_end uint = position + num_bytes
|
||||
var store_end uint
|
||||
if num_bytes >= hasher.StoreLookahead() {
|
||||
store_end = position + num_bytes - hasher.StoreLookahead() + 1
|
||||
} else {
|
||||
store_end = position
|
||||
}
|
||||
var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params)
|
||||
var apply_random_heuristics uint = position + random_heuristics_window_size
|
||||
var gap uint = 0
|
||||
/* Set maximum distance, see section 9.1. of the spec. */
|
||||
|
||||
const kMinScore uint = scoreBase + 100
|
||||
|
||||
/* For speed up heuristics for random data. */
|
||||
|
||||
/* Minimum score to accept a backward reference. */
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr2 == nil {
|
||||
sr2 = &hasherSearchResult{}
|
||||
}
|
||||
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr == nil {
|
||||
sr = &hasherSearchResult{}
|
||||
}
|
||||
|
||||
for position+hasher.HashTypeLength() < pos_end {
|
||||
var max_length uint = pos_end - position
|
||||
var max_distance uint = brotli_min_size_t(position, max_backward_limit)
|
||||
sr.len = 0
|
||||
sr.len_code_delta = 0
|
||||
sr.distance = 0
|
||||
sr.score = kMinScore
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
|
||||
if sr.score > kMinScore {
|
||||
/* Found a match. Let's look for something even better ahead. */
|
||||
var delayed_backward_references_in_row int = 0
|
||||
max_length--
|
||||
for ; ; max_length-- {
|
||||
var cost_diff_lazy uint = 175
|
||||
if params.quality < minQualityForExtensiveReferenceSearch {
|
||||
sr2.len = brotli_min_size_t(sr.len-1, max_length)
|
||||
} else {
|
||||
sr2.len = 0
|
||||
}
|
||||
sr2.len_code_delta = 0
|
||||
sr2.distance = 0
|
||||
sr2.score = kMinScore
|
||||
max_distance = brotli_min_size_t(position+1, max_backward_limit)
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
|
||||
if sr2.score >= sr.score+cost_diff_lazy {
|
||||
/* Ok, let's just write one byte for now and start a match from the
|
||||
next byte. */
|
||||
position++
|
||||
|
||||
insert_length++
|
||||
*sr = *sr2
|
||||
delayed_backward_references_in_row++
|
||||
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size
|
||||
max_distance = brotli_min_size_t(position, max_backward_limit)
|
||||
{
|
||||
/* The first 16 codes are special short-codes,
|
||||
and the minimum offset is 1. */
|
||||
var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache)
|
||||
if (sr.distance <= (max_distance + gap)) && distance_code > 0 {
|
||||
dist_cache[3] = dist_cache[2]
|
||||
dist_cache[2] = dist_cache[1]
|
||||
dist_cache[1] = dist_cache[0]
|
||||
dist_cache[0] = int(sr.distance)
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
}
|
||||
|
||||
*commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code))
|
||||
}
|
||||
|
||||
*num_literals += insert_length
|
||||
insert_length = 0
|
||||
/* Put the hash keys into the table, if there are enough bytes left.
|
||||
Depending on the hasher implementation, it can push all positions
|
||||
in the given range or only a subset of them.
|
||||
Avoid hash poisoning with RLE data. */
|
||||
{
|
||||
var range_start uint = position + 2
|
||||
var range_end uint = brotli_min_size_t(position+sr.len, store_end)
|
||||
if sr.distance < sr.len>>2 {
|
||||
range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2)))
|
||||
}
|
||||
|
||||
hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end)
|
||||
}
|
||||
|
||||
position += sr.len
|
||||
} else {
|
||||
insert_length++
|
||||
position++
|
||||
|
||||
/* If we have not seen matches for a long time, we can skip some
|
||||
match lookups. Unsuccessful match lookups are very very expensive
|
||||
and this kind of a heuristic speeds up compression quite
|
||||
a lot. */
|
||||
if position > apply_random_heuristics {
|
||||
/* Going through uncompressible data, jump. */
|
||||
if position > apply_random_heuristics+4*random_heuristics_window_size {
|
||||
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4)
|
||||
/* It is quite a long time since we saw a copy, so we assume
|
||||
that this data is not compressible, and store hashes less
|
||||
often. Hashes of non compressible data are less likely to
|
||||
turn out to be useful in the future, too, so we store less of
|
||||
them to not to flood out the hash table of good compressible
|
||||
data. */
|
||||
|
||||
var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin)
|
||||
for ; position < pos_jump; position += 4 {
|
||||
hasher.Store(ringbuffer, ringbuffer_mask, position)
|
||||
insert_length += 4
|
||||
}
|
||||
} else {
|
||||
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2)
|
||||
var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin)
|
||||
for ; position < pos_jump; position += 2 {
|
||||
hasher.Store(ringbuffer, ringbuffer_mask, position)
|
||||
insert_length += 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
insert_length += pos_end - position
|
||||
*last_insert_len = insert_length
|
||||
|
||||
hasherSearchResultPool.Put(sr)
|
||||
hasherSearchResultPool.Put(sr2)
|
||||
}
|
796
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
796
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
@ -1,796 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
type zopfliNode struct {
|
||||
length uint32
|
||||
distance uint32
|
||||
dcode_insert_length uint32
|
||||
u struct {
|
||||
cost float32
|
||||
next uint32
|
||||
shortcut uint32
|
||||
}
|
||||
}
|
||||
|
||||
const maxEffectiveDistanceAlphabetSize = 544
|
||||
|
||||
const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */
|
||||
|
||||
var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}
|
||||
|
||||
var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3}
|
||||
|
||||
func initZopfliNodes(array []zopfliNode, length uint) {
|
||||
var stub zopfliNode
|
||||
var i uint
|
||||
stub.length = 1
|
||||
stub.distance = 0
|
||||
stub.dcode_insert_length = 0
|
||||
stub.u.cost = kInfinity
|
||||
for i = 0; i < length; i++ {
|
||||
array[i] = stub
|
||||
}
|
||||
}
|
||||
|
||||
func zopfliNodeCopyLength(self *zopfliNode) uint32 {
|
||||
return self.length & 0x1FFFFFF
|
||||
}
|
||||
|
||||
func zopfliNodeLengthCode(self *zopfliNode) uint32 {
|
||||
var modifier uint32 = self.length >> 25
|
||||
return zopfliNodeCopyLength(self) + 9 - modifier
|
||||
}
|
||||
|
||||
func zopfliNodeCopyDistance(self *zopfliNode) uint32 {
|
||||
return self.distance
|
||||
}
|
||||
|
||||
func zopfliNodeDistanceCode(self *zopfliNode) uint32 {
|
||||
var short_code uint32 = self.dcode_insert_length >> 27
|
||||
if short_code == 0 {
|
||||
return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1
|
||||
} else {
|
||||
return short_code - 1
|
||||
}
|
||||
}
|
||||
|
||||
func zopfliNodeCommandLength(self *zopfliNode) uint32 {
|
||||
return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF)
|
||||
}
|
||||
|
||||
/* Histogram based cost model for zopflification. */
|
||||
type zopfliCostModel struct {
|
||||
cost_cmd_ [numCommandSymbols]float32
|
||||
cost_dist_ []float32
|
||||
distance_histogram_size uint32
|
||||
literal_costs_ []float32
|
||||
min_cost_cmd_ float32
|
||||
num_bytes_ uint
|
||||
}
|
||||
|
||||
func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) {
|
||||
var distance_histogram_size uint32 = dist.alphabet_size
|
||||
if distance_histogram_size > maxEffectiveDistanceAlphabetSize {
|
||||
distance_histogram_size = maxEffectiveDistanceAlphabetSize
|
||||
}
|
||||
|
||||
self.num_bytes_ = num_bytes
|
||||
self.literal_costs_ = make([]float32, (num_bytes + 2))
|
||||
self.cost_dist_ = make([]float32, (dist.alphabet_size))
|
||||
self.distance_histogram_size = distance_histogram_size
|
||||
}
|
||||
|
||||
func cleanupZopfliCostModel(self *zopfliCostModel) {
|
||||
self.literal_costs_ = nil
|
||||
self.cost_dist_ = nil
|
||||
}
|
||||
|
||||
func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) {
|
||||
var sum uint = 0
|
||||
var missing_symbol_sum uint
|
||||
var log2sum float32
|
||||
var missing_symbol_cost float32
|
||||
var i uint
|
||||
for i = 0; i < histogram_size; i++ {
|
||||
sum += uint(histogram[i])
|
||||
}
|
||||
|
||||
log2sum = float32(fastLog2(sum))
|
||||
missing_symbol_sum = sum
|
||||
if !literal_histogram {
|
||||
for i = 0; i < histogram_size; i++ {
|
||||
if histogram[i] == 0 {
|
||||
missing_symbol_sum++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2
|
||||
for i = 0; i < histogram_size; i++ {
|
||||
if histogram[i] == 0 {
|
||||
cost[i] = missing_symbol_cost
|
||||
continue
|
||||
}
|
||||
|
||||
/* Shannon bits for this symbol. */
|
||||
cost[i] = log2sum - float32(fastLog2(uint(histogram[i])))
|
||||
|
||||
/* Cannot be coded with less than 1 bit */
|
||||
if cost[i] < 1 {
|
||||
cost[i] = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) {
|
||||
var histogram_literal [numLiteralSymbols]uint32
|
||||
var histogram_cmd [numCommandSymbols]uint32
|
||||
var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32
|
||||
var cost_literal [numLiteralSymbols]float32
|
||||
var pos uint = position - last_insert_len
|
||||
var min_cost_cmd float32 = kInfinity
|
||||
var cost_cmd []float32 = self.cost_cmd_[:]
|
||||
var literal_costs []float32
|
||||
|
||||
histogram_literal = [numLiteralSymbols]uint32{}
|
||||
histogram_cmd = [numCommandSymbols]uint32{}
|
||||
histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{}
|
||||
|
||||
for i := range commands {
|
||||
var inslength uint = uint(commands[i].insert_len_)
|
||||
var copylength uint = uint(commandCopyLen(&commands[i]))
|
||||
var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF
|
||||
var cmdcode uint = uint(commands[i].cmd_prefix_)
|
||||
var j uint
|
||||
|
||||
histogram_cmd[cmdcode]++
|
||||
if cmdcode >= 128 {
|
||||
histogram_dist[distcode]++
|
||||
}
|
||||
|
||||
for j = 0; j < inslength; j++ {
|
||||
histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++
|
||||
}
|
||||
|
||||
pos += inslength + copylength
|
||||
}
|
||||
|
||||
setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:])
|
||||
setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd)
|
||||
setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_)
|
||||
|
||||
for i := 0; i < numCommandSymbols; i++ {
|
||||
min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i])
|
||||
}
|
||||
|
||||
self.min_cost_cmd_ = min_cost_cmd
|
||||
{
|
||||
literal_costs = self.literal_costs_
|
||||
var literal_carry float32 = 0.0
|
||||
num_bytes := int(self.num_bytes_)
|
||||
literal_costs[0] = 0.0
|
||||
for i := 0; i < num_bytes; i++ {
|
||||
literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]]
|
||||
literal_costs[i+1] = literal_costs[i] + literal_carry
|
||||
literal_carry -= literal_costs[i+1] - literal_costs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) {
|
||||
var literal_costs []float32 = self.literal_costs_
|
||||
var literal_carry float32 = 0.0
|
||||
var cost_dist []float32 = self.cost_dist_
|
||||
var cost_cmd []float32 = self.cost_cmd_[:]
|
||||
var num_bytes uint = self.num_bytes_
|
||||
var i uint
|
||||
estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:])
|
||||
literal_costs[0] = 0.0
|
||||
for i = 0; i < num_bytes; i++ {
|
||||
literal_carry += literal_costs[i+1]
|
||||
literal_costs[i+1] = literal_costs[i] + literal_carry
|
||||
literal_carry -= literal_costs[i+1] - literal_costs[i]
|
||||
}
|
||||
|
||||
for i = 0; i < numCommandSymbols; i++ {
|
||||
cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i))))
|
||||
}
|
||||
|
||||
for i = 0; uint32(i) < self.distance_histogram_size; i++ {
|
||||
cost_dist[i] = float32(fastLog2(uint(20 + uint32(i))))
|
||||
}
|
||||
|
||||
self.min_cost_cmd_ = float32(fastLog2(11))
|
||||
}
|
||||
|
||||
func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 {
|
||||
return self.cost_cmd_[cmdcode]
|
||||
}
|
||||
|
||||
func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 {
|
||||
return self.cost_dist_[distcode]
|
||||
}
|
||||
|
||||
func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 {
|
||||
return self.literal_costs_[to] - self.literal_costs_[from]
|
||||
}
|
||||
|
||||
func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 {
|
||||
return self.min_cost_cmd_
|
||||
}
|
||||
|
||||
/* REQUIRES: len >= 2, start_pos <= pos */
|
||||
/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */
|
||||
/* Maintains the "ZopfliNode array invariant". */
|
||||
func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) {
|
||||
var next *zopfliNode = &nodes[pos+len]
|
||||
next.length = uint32(len | (len+9-len_code)<<25)
|
||||
next.distance = uint32(dist)
|
||||
next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos))
|
||||
next.u.cost = cost
|
||||
}
|
||||
|
||||
type posData struct {
|
||||
pos uint
|
||||
distance_cache [4]int
|
||||
costdiff float32
|
||||
cost float32
|
||||
}
|
||||
|
||||
/* Maintains the smallest 8 cost difference together with their positions */
|
||||
type startPosQueue struct {
|
||||
q_ [8]posData
|
||||
idx_ uint
|
||||
}
|
||||
|
||||
func initStartPosQueue(self *startPosQueue) {
|
||||
self.idx_ = 0
|
||||
}
|
||||
|
||||
func startPosQueueSize(self *startPosQueue) uint {
|
||||
return brotli_min_size_t(self.idx_, 8)
|
||||
}
|
||||
|
||||
func startPosQueuePush(self *startPosQueue, posdata *posData) {
|
||||
var offset uint = ^(self.idx_) & 7
|
||||
self.idx_++
|
||||
var len uint = startPosQueueSize(self)
|
||||
var i uint
|
||||
var q []posData = self.q_[:]
|
||||
q[offset] = *posdata
|
||||
|
||||
/* Restore the sorted order. In the list of |len| items at most |len - 1|
|
||||
adjacent element comparisons / swaps are required. */
|
||||
for i = 1; i < len; i++ {
|
||||
if q[offset&7].costdiff > q[(offset+1)&7].costdiff {
|
||||
var tmp posData = q[offset&7]
|
||||
q[offset&7] = q[(offset+1)&7]
|
||||
q[(offset+1)&7] = tmp
|
||||
}
|
||||
|
||||
offset++
|
||||
}
|
||||
}
|
||||
|
||||
func startPosQueueAt(self *startPosQueue, k uint) *posData {
|
||||
return &self.q_[(k-self.idx_)&7]
|
||||
}
|
||||
|
||||
/* Returns the minimum possible copy length that can improve the cost of any */
|
||||
/* future position. */
|
||||
func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint {
|
||||
var min_cost float32 = start_cost
|
||||
var len uint = 2
|
||||
var next_len_bucket uint = 4
|
||||
/* Compute the minimum possible cost of reaching any future position. */
|
||||
|
||||
var next_len_offset uint = 10
|
||||
for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost {
|
||||
/* We already reached (pos + len) with no more cost than the minimum
|
||||
possible cost of reaching anything from this pos, so there is no point in
|
||||
looking for lengths <= len. */
|
||||
len++
|
||||
|
||||
if len == next_len_offset {
|
||||
/* We reached the next copy length code bucket, so we add one more
|
||||
extra bit to the minimum cost. */
|
||||
min_cost += 1.0
|
||||
|
||||
next_len_offset += next_len_bucket
|
||||
next_len_bucket *= 2
|
||||
}
|
||||
}
|
||||
|
||||
return uint(len)
|
||||
}
|
||||
|
||||
/* REQUIRES: nodes[pos].cost < kInfinity
|
||||
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
|
||||
func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 {
|
||||
var clen uint = uint(zopfliNodeCopyLength(&nodes[pos]))
|
||||
var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF)
|
||||
var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos]))
|
||||
|
||||
/* Since |block_start + pos| is the end position of the command, the copy part
|
||||
starts from |block_start + pos - clen|. Distances that are greater than
|
||||
this or greater than |max_backward_limit| + |gap| are static dictionary
|
||||
references, and do not update the last distances.
|
||||
Also distance code 0 (last distance) does not update the last distances. */
|
||||
if pos == 0 {
|
||||
return 0
|
||||
} else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 {
|
||||
return uint32(pos)
|
||||
} else {
|
||||
return nodes[pos-clen-ilen].u.shortcut
|
||||
}
|
||||
}
|
||||
|
||||
/* Fills in dist_cache[0..3] with the last four distances (as defined by
|
||||
Section 4. of the Spec) that would be used at (block_start + pos) if we
|
||||
used the shortest path of commands from block_start, computed from
|
||||
nodes[0..pos]. The last four distances at block_start are in
|
||||
starting_dist_cache[0..3].
|
||||
REQUIRES: nodes[pos].cost < kInfinity
|
||||
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
|
||||
func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) {
|
||||
var idx int = 0
|
||||
var p uint = uint(nodes[pos].u.shortcut)
|
||||
for idx < 4 && p > 0 {
|
||||
var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF)
|
||||
var clen uint = uint(zopfliNodeCopyLength(&nodes[p]))
|
||||
var dist uint = uint(zopfliNodeCopyDistance(&nodes[p]))
|
||||
dist_cache[idx] = int(dist)
|
||||
idx++
|
||||
|
||||
/* Because of prerequisite, p >= clen + ilen >= 2. */
|
||||
p = uint(nodes[p-clen-ilen].u.shortcut)
|
||||
}
|
||||
|
||||
for ; idx < 4; idx++ {
|
||||
dist_cache[idx] = starting_dist_cache[0]
|
||||
starting_dist_cache = starting_dist_cache[1:]
|
||||
}
|
||||
}
|
||||
|
||||
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
|
||||
is eligible. */
|
||||
func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) {
|
||||
/* Save cost, because ComputeDistanceCache invalidates it. */
|
||||
var node_cost float32 = nodes[pos].u.cost
|
||||
nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes)
|
||||
if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) {
|
||||
var posdata posData
|
||||
posdata.pos = pos
|
||||
posdata.cost = node_cost
|
||||
posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos)
|
||||
computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:])
|
||||
startPosQueuePush(queue, &posdata)
|
||||
}
|
||||
}
|
||||
|
||||
/* Returns longest copy length. */
|
||||
func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint {
|
||||
var cur_ix uint = block_start + pos
|
||||
var cur_ix_masked uint = cur_ix & ringbuffer_mask
|
||||
var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit)
|
||||
var max_len uint = num_bytes - pos
|
||||
var max_zopfli_len uint = maxZopfliLen(params)
|
||||
var max_iters uint = maxZopfliCandidates(params)
|
||||
var min_len uint
|
||||
var result uint = 0
|
||||
var k uint
|
||||
var gap uint = 0
|
||||
|
||||
evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes)
|
||||
{
|
||||
var posdata *posData = startPosQueueAt(queue, 0)
|
||||
var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos))
|
||||
min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos)
|
||||
}
|
||||
|
||||
/* Go over the command starting positions in order of increasing cost
|
||||
difference. */
|
||||
for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ {
|
||||
var posdata *posData = startPosQueueAt(queue, k)
|
||||
var start uint = posdata.pos
|
||||
var inscode uint16 = getInsertLengthCode(pos - start)
|
||||
var start_costdiff float32 = posdata.costdiff
|
||||
var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos)
|
||||
var best_len uint = min_len - 1
|
||||
var j uint = 0
|
||||
/* Look for last distance matches using the distance cache from this
|
||||
starting position. */
|
||||
for ; j < numDistanceShortCodes && best_len < max_len; j++ {
|
||||
var idx uint = uint(kDistanceCacheIndex[j])
|
||||
var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j])
|
||||
var prev_ix uint = cur_ix - backward
|
||||
var len uint = 0
|
||||
var continuation byte = ringbuffer[cur_ix_masked+best_len]
|
||||
if cur_ix_masked+best_len > ringbuffer_mask {
|
||||
break
|
||||
}
|
||||
|
||||
if backward > max_distance+gap {
|
||||
/* Word dictionary -> ignore. */
|
||||
continue
|
||||
}
|
||||
|
||||
if backward <= max_distance {
|
||||
/* Regular backward reference. */
|
||||
if prev_ix >= cur_ix {
|
||||
continue
|
||||
}
|
||||
|
||||
prev_ix &= ringbuffer_mask
|
||||
if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] {
|
||||
continue
|
||||
}
|
||||
|
||||
len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
{
|
||||
var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j)
|
||||
var l uint
|
||||
for l = best_len + 1; l <= len; l++ {
|
||||
var copycode uint16 = getCopyLengthCode(l)
|
||||
var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0)
|
||||
var tmp float32
|
||||
if cmdcode < 128 {
|
||||
tmp = base_cost
|
||||
} else {
|
||||
tmp = dist_cost
|
||||
}
|
||||
var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
|
||||
if cost < nodes[pos+l].u.cost {
|
||||
updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost)
|
||||
result = brotli_max_size_t(result, l)
|
||||
}
|
||||
|
||||
best_len = l
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* At higher iterations look only for new last distance matches, since
|
||||
looking only for new command start positions with the same distances
|
||||
does not help much. */
|
||||
if k >= 2 {
|
||||
continue
|
||||
}
|
||||
{
|
||||
/* Loop through all possible copy lengths at this position. */
|
||||
var len uint = min_len
|
||||
for j = 0; j < num_matches; j++ {
|
||||
var match backwardMatch = matches[j]
|
||||
var dist uint = uint(match.distance)
|
||||
var is_dictionary_match bool = (dist > max_distance+gap)
|
||||
var dist_code uint = dist + numDistanceShortCodes - 1
|
||||
var dist_symbol uint16
|
||||
var distextra uint32
|
||||
var distnumextra uint32
|
||||
var dist_cost float32
|
||||
var max_match_len uint
|
||||
/* We already tried all possible last distance matches, so we can use
|
||||
normal distance code here. */
|
||||
prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra)
|
||||
|
||||
distnumextra = uint32(dist_symbol) >> 10
|
||||
dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF)
|
||||
|
||||
/* Try all copy lengths up until the maximum copy length corresponding
|
||||
to this distance. If the distance refers to the static dictionary, or
|
||||
the maximum length is long enough, try only one maximum length. */
|
||||
max_match_len = backwardMatchLength(&match)
|
||||
|
||||
if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) {
|
||||
len = max_match_len
|
||||
}
|
||||
|
||||
for ; len <= max_match_len; len++ {
|
||||
var len_code uint
|
||||
if is_dictionary_match {
|
||||
len_code = backwardMatchLengthCode(&match)
|
||||
} else {
|
||||
len_code = len
|
||||
}
|
||||
var copycode uint16 = getCopyLengthCode(len_code)
|
||||
var cmdcode uint16 = combineLengthCodes(inscode, copycode, false)
|
||||
var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
|
||||
if cost < nodes[pos+len].u.cost {
|
||||
updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost)
|
||||
if len > result {
|
||||
result = len
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint {
|
||||
var index uint = num_bytes
|
||||
var num_commands uint = 0
|
||||
for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 {
|
||||
index--
|
||||
}
|
||||
nodes[index].u.next = math.MaxUint32
|
||||
for index != 0 {
|
||||
var len uint = uint(zopfliNodeCommandLength(&nodes[index]))
|
||||
index -= uint(len)
|
||||
nodes[index].u.next = uint32(len)
|
||||
num_commands++
|
||||
}
|
||||
|
||||
return num_commands
|
||||
}
|
||||
|
||||
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
|
||||
func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var pos uint = 0
|
||||
var offset uint32 = nodes[0].u.next
|
||||
var i uint
|
||||
var gap uint = 0
|
||||
for i = 0; offset != math.MaxUint32; i++ {
|
||||
var next *zopfliNode = &nodes[uint32(pos)+offset]
|
||||
var copy_length uint = uint(zopfliNodeCopyLength(next))
|
||||
var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF)
|
||||
pos += insert_length
|
||||
offset = next.u.next
|
||||
if i == 0 {
|
||||
insert_length += *last_insert_len
|
||||
*last_insert_len = 0
|
||||
}
|
||||
{
|
||||
var distance uint = uint(zopfliNodeCopyDistance(next))
|
||||
var len_code uint = uint(zopfliNodeLengthCode(next))
|
||||
var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit)
|
||||
var is_dictionary bool = (distance > max_distance+gap)
|
||||
var dist_code uint = uint(zopfliNodeDistanceCode(next))
|
||||
*commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code))
|
||||
|
||||
if !is_dictionary && dist_code > 0 {
|
||||
dist_cache[3] = dist_cache[2]
|
||||
dist_cache[2] = dist_cache[1]
|
||||
dist_cache[1] = dist_cache[0]
|
||||
dist_cache[0] = int(distance)
|
||||
}
|
||||
}
|
||||
|
||||
*num_literals += insert_length
|
||||
pos += copy_length
|
||||
}
|
||||
|
||||
*last_insert_len += num_bytes - pos
|
||||
}
|
||||
|
||||
func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var max_zopfli_len uint = maxZopfliLen(params)
|
||||
var queue startPosQueue
|
||||
var cur_match_pos uint = 0
|
||||
var i uint
|
||||
nodes[0].length = 0
|
||||
nodes[0].u.cost = 0
|
||||
initStartPosQueue(&queue)
|
||||
for i = 0; i+3 < num_bytes; i++ {
|
||||
var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes)
|
||||
if skip < longCopyQuickStep {
|
||||
skip = 0
|
||||
}
|
||||
cur_match_pos += uint(num_matches[i])
|
||||
if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len {
|
||||
skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip)
|
||||
}
|
||||
|
||||
if skip > 1 {
|
||||
skip--
|
||||
for skip != 0 {
|
||||
i++
|
||||
if i+3 >= num_bytes {
|
||||
break
|
||||
}
|
||||
evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes)
|
||||
cur_match_pos += uint(num_matches[i])
|
||||
skip--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return computeShortestPathFromNodes(num_bytes, nodes)
|
||||
}
|
||||
|
||||
/* Computes the shortest path of commands from position to at most
|
||||
position + num_bytes.
|
||||
|
||||
On return, path->size() is the number of commands found and path[i] is the
|
||||
length of the i-th command (copy length plus insert length).
|
||||
Note that the sum of the lengths of all commands can be less than num_bytes.
|
||||
|
||||
On return, the nodes[0..num_bytes] array will have the following
|
||||
"ZopfliNode array invariant":
|
||||
For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then
|
||||
(1) nodes[i].copy_length() >= 2
|
||||
(2) nodes[i].command_length() <= i and
|
||||
(3) nodes[i - nodes[i].command_length()].cost < kInfinity
|
||||
|
||||
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */
|
||||
func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var max_zopfli_len uint = maxZopfliLen(params)
|
||||
var model zopfliCostModel
|
||||
var queue startPosQueue
|
||||
var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch
|
||||
var store_end uint
|
||||
if num_bytes >= hasher.StoreLookahead() {
|
||||
store_end = position + num_bytes - hasher.StoreLookahead() + 1
|
||||
} else {
|
||||
store_end = position
|
||||
}
|
||||
var i uint
|
||||
var gap uint = 0
|
||||
var lz_matches_offset uint = 0
|
||||
nodes[0].length = 0
|
||||
nodes[0].u.cost = 0
|
||||
initZopfliCostModel(&model, ¶ms.dist, num_bytes)
|
||||
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
|
||||
initStartPosQueue(&queue)
|
||||
for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ {
|
||||
var pos uint = position + i
|
||||
var max_distance uint = brotli_min_size_t(pos, max_backward_limit)
|
||||
var skip uint
|
||||
var num_matches uint
|
||||
num_matches = findAllMatchesH10(hasher, ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:])
|
||||
if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len {
|
||||
matches[0] = matches[num_matches-1]
|
||||
num_matches = 1
|
||||
}
|
||||
|
||||
skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes)
|
||||
if skip < longCopyQuickStep {
|
||||
skip = 0
|
||||
}
|
||||
if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len {
|
||||
skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip)
|
||||
}
|
||||
|
||||
if skip > 1 {
|
||||
/* Add the tail of the copy to the hasher. */
|
||||
hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end))
|
||||
|
||||
skip--
|
||||
for skip != 0 {
|
||||
i++
|
||||
if i+hasher.HashTypeLength()-1 >= num_bytes {
|
||||
break
|
||||
}
|
||||
evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes)
|
||||
skip--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cleanupZopfliCostModel(&model)
|
||||
return computeShortestPathFromNodes(num_bytes, nodes)
|
||||
}
|
||||
|
||||
func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var nodes []zopfliNode
|
||||
nodes = make([]zopfliNode, (num_bytes + 1))
|
||||
initZopfliNodes(nodes, num_bytes+1)
|
||||
zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
|
||||
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
|
||||
nodes = nil
|
||||
}
|
||||
|
||||
func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var num_matches []uint32 = make([]uint32, num_bytes)
|
||||
var matches_size uint = 4 * num_bytes
|
||||
var store_end uint
|
||||
if num_bytes >= hasher.StoreLookahead() {
|
||||
store_end = position + num_bytes - hasher.StoreLookahead() + 1
|
||||
} else {
|
||||
store_end = position
|
||||
}
|
||||
var cur_match_pos uint = 0
|
||||
var i uint
|
||||
var orig_num_literals uint
|
||||
var orig_last_insert_len uint
|
||||
var orig_dist_cache [4]int
|
||||
var orig_num_commands int
|
||||
var model zopfliCostModel
|
||||
var nodes []zopfliNode
|
||||
var matches []backwardMatch = make([]backwardMatch, matches_size)
|
||||
var gap uint = 0
|
||||
var shadow_matches uint = 0
|
||||
var new_array []backwardMatch
|
||||
for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ {
|
||||
var pos uint = position + i
|
||||
var max_distance uint = brotli_min_size_t(pos, max_backward_limit)
|
||||
var max_length uint = num_bytes - i
|
||||
var num_found_matches uint
|
||||
var cur_match_end uint
|
||||
var j uint
|
||||
|
||||
/* Ensure that we have enough free slots. */
|
||||
if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches {
|
||||
var new_size uint = matches_size
|
||||
if new_size == 0 {
|
||||
new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches
|
||||
}
|
||||
|
||||
for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches {
|
||||
new_size *= 2
|
||||
}
|
||||
|
||||
new_array = make([]backwardMatch, new_size)
|
||||
if matches_size != 0 {
|
||||
copy(new_array, matches[:matches_size])
|
||||
}
|
||||
|
||||
matches = new_array
|
||||
matches_size = new_size
|
||||
}
|
||||
|
||||
num_found_matches = findAllMatchesH10(hasher.(*h10), ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:])
|
||||
cur_match_end = cur_match_pos + num_found_matches
|
||||
for j = cur_match_pos; j+1 < cur_match_end; j++ {
|
||||
assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1]))
|
||||
}
|
||||
|
||||
num_matches[i] = uint32(num_found_matches)
|
||||
if num_found_matches > 0 {
|
||||
var match_len uint = backwardMatchLength(&matches[cur_match_end-1])
|
||||
if match_len > maxZopfliLenQuality11 {
|
||||
var skip uint = match_len - 1
|
||||
matches[cur_match_pos] = matches[cur_match_end-1]
|
||||
cur_match_pos++
|
||||
num_matches[i] = 1
|
||||
|
||||
/* Add the tail of the copy to the hasher. */
|
||||
hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end))
|
||||
var pos uint = i
|
||||
for i := 0; i < int(skip); i++ {
|
||||
num_matches[pos+1:][i] = 0
|
||||
}
|
||||
i += skip
|
||||
} else {
|
||||
cur_match_pos = cur_match_end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
orig_num_literals = *num_literals
|
||||
orig_last_insert_len = *last_insert_len
|
||||
copy(orig_dist_cache[:], dist_cache[:4])
|
||||
orig_num_commands = len(*commands)
|
||||
nodes = make([]zopfliNode, (num_bytes + 1))
|
||||
initZopfliCostModel(&model, ¶ms.dist, num_bytes)
|
||||
for i = 0; i < 2; i++ {
|
||||
initZopfliNodes(nodes, num_bytes+1)
|
||||
if i == 0 {
|
||||
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
|
||||
} else {
|
||||
zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len)
|
||||
}
|
||||
|
||||
*commands = (*commands)[:orig_num_commands]
|
||||
*num_literals = orig_num_literals
|
||||
*last_insert_len = orig_last_insert_len
|
||||
copy(dist_cache, orig_dist_cache[:4])
|
||||
zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
|
||||
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
|
||||
}
|
||||
|
||||
cleanupZopfliCostModel(&model)
|
||||
nodes = nil
|
||||
matches = nil
|
||||
num_matches = nil
|
||||
}
|
436
vendor/github.com/andybalholm/brotli/bit_cost.go
generated
vendored
436
vendor/github.com/andybalholm/brotli/bit_cost.go
generated
vendored
@ -1,436 +0,0 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Functions to estimate the bit cost of Huffman trees. */
|
||||
func shannonEntropy(population []uint32, size uint, total *uint) float64 {
|
||||
var sum uint = 0
|
||||
var retval float64 = 0
|
||||
var population_end []uint32 = population[size:]
|
||||
var p uint
|
||||
for -cap(population) < -cap(population_end) {
|
||||
p = uint(population[0])
|
||||
population = population[1:]
|
||||
sum += p
|
||||
retval -= float64(p) * fastLog2(p)
|
||||
}
|
||||
|
||||
if sum != 0 {
|
||||
retval += float64(sum) * fastLog2(sum)
|
||||
}
|
||||
*total = sum
|
||||
return retval
|
||||
}
|
||||
|
||||
func bitsEntropy(population []uint32, size uint) float64 {
|
||||
var sum uint
|
||||
var retval float64 = shannonEntropy(population, size, &sum)
|
||||
if retval < float64(sum) {
|
||||
/* At least one bit per literal is needed. */
|
||||
retval = float64(sum)
|
||||
}
|
||||
|
||||
return retval
|
||||
}
|
||||
|
||||
const kOneSymbolHistogramCost float64 = 12
|
||||
const kTwoSymbolHistogramCost float64 = 20
|
||||
const kThreeSymbolHistogramCost float64 = 28
|
||||
const kFourSymbolHistogramCost float64 = 37
|
||||
|
||||
func populationCostLiteral(histogram *histogramLiteral) float64 {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
||||
|
||||
func populationCostCommand(histogram *histogramCommand) float64 {
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
||||
|
||||
func populationCostDistance(histogram *histogramDistance) float64 {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var count int = 0
|
||||
var s [5]uint
|
||||
var bits float64 = 0.0
|
||||
var i uint
|
||||
if histogram.total_count_ == 0 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
for i = 0; i < data_size; i++ {
|
||||
if histogram.data_[i] > 0 {
|
||||
s[count] = i
|
||||
count++
|
||||
if count > 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
return kOneSymbolHistogramCost
|
||||
}
|
||||
|
||||
if count == 2 {
|
||||
return kTwoSymbolHistogramCost + float64(histogram.total_count_)
|
||||
}
|
||||
|
||||
if count == 3 {
|
||||
var histo0 uint32 = histogram.data_[s[0]]
|
||||
var histo1 uint32 = histogram.data_[s[1]]
|
||||
var histo2 uint32 = histogram.data_[s[2]]
|
||||
var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2))
|
||||
return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax)
|
||||
}
|
||||
|
||||
if count == 4 {
|
||||
var histo [4]uint32
|
||||
var h23 uint32
|
||||
var histomax uint32
|
||||
for i = 0; i < 4; i++ {
|
||||
histo[i] = histogram.data_[s[i]]
|
||||
}
|
||||
|
||||
/* Sort */
|
||||
for i = 0; i < 4; i++ {
|
||||
var j uint
|
||||
for j = i + 1; j < 4; j++ {
|
||||
if histo[j] > histo[i] {
|
||||
var tmp uint32 = histo[j]
|
||||
histo[j] = histo[i]
|
||||
histo[i] = tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h23 = histo[2] + histo[3]
|
||||
histomax = brotli_max_uint32_t(h23, histo[0])
|
||||
return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax)
|
||||
}
|
||||
{
|
||||
var max_depth uint = 1
|
||||
var depth_histo = [codeLengthCodes]uint32{0}
|
||||
/* In this loop we compute the entropy of the histogram and simultaneously
|
||||
build a simplified histogram of the code length codes where we use the
|
||||
zero repeat code 17, but we don't use the non-zero repeat code 16. */
|
||||
|
||||
var log2total float64 = fastLog2(histogram.total_count_)
|
||||
for i = 0; i < data_size; {
|
||||
if histogram.data_[i] > 0 {
|
||||
var log2p float64 = log2total - fastLog2(uint(histogram.data_[i]))
|
||||
/* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) =
|
||||
= log2(total_count) - log2(count(symbol)) */
|
||||
|
||||
var depth uint = uint(log2p + 0.5)
|
||||
/* Approximate the bit depth by round(-log2(P(symbol))) */
|
||||
bits += float64(histogram.data_[i]) * log2p
|
||||
|
||||
if depth > 15 {
|
||||
depth = 15
|
||||
}
|
||||
|
||||
if depth > max_depth {
|
||||
max_depth = depth
|
||||
}
|
||||
|
||||
depth_histo[depth]++
|
||||
i++
|
||||
} else {
|
||||
var reps uint32 = 1
|
||||
/* Compute the run length of zeros and add the appropriate number of 0
|
||||
and 17 code length codes to the code length code histogram. */
|
||||
|
||||
var k uint
|
||||
for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ {
|
||||
reps++
|
||||
}
|
||||
|
||||
i += uint(reps)
|
||||
if i == data_size {
|
||||
/* Don't add any cost for the last zero run, since these are encoded
|
||||
only implicitly. */
|
||||
break
|
||||
}
|
||||
|
||||
if reps < 3 {
|
||||
depth_histo[0] += reps
|
||||
} else {
|
||||
reps -= 2
|
||||
for reps > 0 {
|
||||
depth_histo[repeatZeroCodeLength]++
|
||||
|
||||
/* Add the 3 extra bits for the 17 code length code. */
|
||||
bits += 3
|
||||
|
||||
reps >>= 3
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the estimated encoding cost of the code length code histogram. */
|
||||
bits += float64(18 + 2*max_depth)
|
||||
|
||||
/* Add the entropy of the code length code histogram. */
|
||||
bits += bitsEntropy(depth_histo[:], codeLengthCodes)
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
266
vendor/github.com/andybalholm/brotli/bit_reader.go
generated
vendored
266
vendor/github.com/andybalholm/brotli/bit_reader.go
generated
vendored
@ -1,266 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Bit reading helpers */
|
||||
|
||||
const shortFillBitWindowRead = (8 >> 1)
|
||||
|
||||
var kBitMask = [33]uint32{
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000003,
|
||||
0x00000007,
|
||||
0x0000000F,
|
||||
0x0000001F,
|
||||
0x0000003F,
|
||||
0x0000007F,
|
||||
0x000000FF,
|
||||
0x000001FF,
|
||||
0x000003FF,
|
||||
0x000007FF,
|
||||
0x00000FFF,
|
||||
0x00001FFF,
|
||||
0x00003FFF,
|
||||
0x00007FFF,
|
||||
0x0000FFFF,
|
||||
0x0001FFFF,
|
||||
0x0003FFFF,
|
||||
0x0007FFFF,
|
||||
0x000FFFFF,
|
||||
0x001FFFFF,
|
||||
0x003FFFFF,
|
||||
0x007FFFFF,
|
||||
0x00FFFFFF,
|
||||
0x01FFFFFF,
|
||||
0x03FFFFFF,
|
||||
0x07FFFFFF,
|
||||
0x0FFFFFFF,
|
||||
0x1FFFFFFF,
|
||||
0x3FFFFFFF,
|
||||
0x7FFFFFFF,
|
||||
0xFFFFFFFF,
|
||||
}
|
||||
|
||||
func bitMask(n uint32) uint32 {
|
||||
return kBitMask[n]
|
||||
}
|
||||
|
||||
type bitReader struct {
|
||||
val_ uint64
|
||||
bit_pos_ uint32
|
||||
input []byte
|
||||
input_len uint
|
||||
byte_pos uint
|
||||
}
|
||||
|
||||
type bitReaderState struct {
|
||||
val_ uint64
|
||||
bit_pos_ uint32
|
||||
input []byte
|
||||
input_len uint
|
||||
byte_pos uint
|
||||
}
|
||||
|
||||
/* Initializes the BrotliBitReader fields. */
|
||||
|
||||
/* Ensures that accumulator is not empty.
|
||||
May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
|
||||
Returns false if data is required but there is no input available.
|
||||
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
|
||||
reading. */
|
||||
func bitReaderSaveState(from *bitReader, to *bitReaderState) {
|
||||
to.val_ = from.val_
|
||||
to.bit_pos_ = from.bit_pos_
|
||||
to.input = from.input
|
||||
to.input_len = from.input_len
|
||||
to.byte_pos = from.byte_pos
|
||||
}
|
||||
|
||||
func bitReaderRestoreState(to *bitReader, from *bitReaderState) {
|
||||
to.val_ = from.val_
|
||||
to.bit_pos_ = from.bit_pos_
|
||||
to.input = from.input
|
||||
to.input_len = from.input_len
|
||||
to.byte_pos = from.byte_pos
|
||||
}
|
||||
|
||||
func getAvailableBits(br *bitReader) uint32 {
|
||||
return 64 - br.bit_pos_
|
||||
}
|
||||
|
||||
/* Returns amount of unread bytes the bit reader still has buffered from the
|
||||
BrotliInput, including whole bytes in br->val_. */
|
||||
func getRemainingBytes(br *bitReader) uint {
|
||||
return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3))
|
||||
}
|
||||
|
||||
/* Checks if there is at least |num| bytes left in the input ring-buffer
|
||||
(excluding the bits remaining in br->val_). */
|
||||
func checkInputAmount(br *bitReader, num uint) bool {
|
||||
return br.input_len-br.byte_pos >= num
|
||||
}
|
||||
|
||||
/* Guarantees that there are at least |n_bits| + 1 bits in accumulator.
|
||||
Precondition: accumulator contains at least 1 bit.
|
||||
|n_bits| should be in the range [1..24] for regular build. For portable
|
||||
non-64-bit little-endian build only 16 bits are safe to request. */
|
||||
func fillBitWindow(br *bitReader, n_bits uint32) {
|
||||
if br.bit_pos_ >= 32 {
|
||||
br.val_ >>= 32
|
||||
br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */
|
||||
br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32
|
||||
br.byte_pos += 4
|
||||
}
|
||||
}
|
||||
|
||||
/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
|
||||
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */
|
||||
func fillBitWindow16(br *bitReader) {
|
||||
fillBitWindow(br, 17)
|
||||
}
|
||||
|
||||
/* Tries to pull one byte of input to accumulator.
|
||||
Returns false if there is no input available. */
|
||||
func pullByte(br *bitReader) bool {
|
||||
if br.byte_pos == br.input_len {
|
||||
return false
|
||||
}
|
||||
|
||||
br.val_ >>= 8
|
||||
br.val_ |= (uint64(br.input[br.byte_pos])) << 56
|
||||
br.bit_pos_ -= 8
|
||||
br.byte_pos++
|
||||
return true
|
||||
}
|
||||
|
||||
/* Returns currently available bits.
|
||||
The number of valid bits could be calculated by BrotliGetAvailableBits. */
|
||||
func getBitsUnmasked(br *bitReader) uint64 {
|
||||
return br.val_ >> br.bit_pos_
|
||||
}
|
||||
|
||||
/* Like BrotliGetBits, but does not mask the result.
|
||||
The result contains at least 16 valid bits. */
|
||||
func get16BitsUnmasked(br *bitReader) uint32 {
|
||||
fillBitWindow(br, 16)
|
||||
return uint32(getBitsUnmasked(br))
|
||||
}
|
||||
|
||||
/* Returns the specified number of bits from |br| without advancing bit
|
||||
position. */
|
||||
func getBits(br *bitReader, n_bits uint32) uint32 {
|
||||
fillBitWindow(br, n_bits)
|
||||
return uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
}
|
||||
|
||||
/* Tries to peek the specified amount of bits. Returns false, if there
|
||||
is not enough input. */
|
||||
func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool {
|
||||
for getAvailableBits(br) < n_bits {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
return true
|
||||
}
|
||||
|
||||
/* Advances the bit pos by |n_bits|. */
|
||||
func dropBits(br *bitReader, n_bits uint32) {
|
||||
br.bit_pos_ += n_bits
|
||||
}
|
||||
|
||||
func bitReaderUnload(br *bitReader) {
|
||||
var unused_bytes uint32 = getAvailableBits(br) >> 3
|
||||
var unused_bits uint32 = unused_bytes << 3
|
||||
br.byte_pos -= uint(unused_bytes)
|
||||
if unused_bits == 64 {
|
||||
br.val_ = 0
|
||||
} else {
|
||||
br.val_ <<= unused_bits
|
||||
}
|
||||
|
||||
br.bit_pos_ += unused_bits
|
||||
}
|
||||
|
||||
/* Reads the specified number of bits from |br| and advances the bit pos.
|
||||
Precondition: accumulator MUST contain at least |n_bits|. */
|
||||
func takeBits(br *bitReader, n_bits uint32, val *uint32) {
|
||||
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
|
||||
dropBits(br, n_bits)
|
||||
}
|
||||
|
||||
/* Reads the specified number of bits from |br| and advances the bit pos.
|
||||
Assumes that there is enough input to perform BrotliFillBitWindow. */
|
||||
func readBits(br *bitReader, n_bits uint32) uint32 {
|
||||
var val uint32
|
||||
fillBitWindow(br, n_bits)
|
||||
takeBits(br, n_bits, &val)
|
||||
return val
|
||||
}
|
||||
|
||||
/* Tries to read the specified amount of bits. Returns false, if there
|
||||
is not enough input. |n_bits| MUST be positive. */
|
||||
func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
|
||||
for getAvailableBits(br) < n_bits {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
takeBits(br, n_bits, val)
|
||||
return true
|
||||
}
|
||||
|
||||
/* Advances the bit reader position to the next byte boundary and verifies
|
||||
that any skipped bits are set to zero. */
|
||||
func bitReaderJumpToByteBoundary(br *bitReader) bool {
|
||||
var pad_bits_count uint32 = getAvailableBits(br) & 0x7
|
||||
var pad_bits uint32 = 0
|
||||
if pad_bits_count != 0 {
|
||||
takeBits(br, pad_bits_count, &pad_bits)
|
||||
}
|
||||
|
||||
return pad_bits == 0
|
||||
}
|
||||
|
||||
/* Copies remaining input bytes stored in the bit reader to the output. Value
|
||||
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
|
||||
warmed up again after this. */
|
||||
func copyBytes(dest []byte, br *bitReader, num uint) {
|
||||
for getAvailableBits(br) >= 8 && num > 0 {
|
||||
dest[0] = byte(getBitsUnmasked(br))
|
||||
dropBits(br, 8)
|
||||
dest = dest[1:]
|
||||
num--
|
||||
}
|
||||
|
||||
copy(dest, br.input[br.byte_pos:][:num])
|
||||
br.byte_pos += num
|
||||
}
|
||||
|
||||
func initBitReader(br *bitReader) {
|
||||
br.val_ = 0
|
||||
br.bit_pos_ = 64
|
||||
}
|
||||
|
||||
func warmupBitReader(br *bitReader) bool {
|
||||
/* Fixing alignment after unaligned BrotliFillWindow would result accumulator
|
||||
overflow. If unalignment is caused by BrotliSafeReadBits, then there is
|
||||
enough space in accumulator to fix alignment. */
|
||||
if getAvailableBits(br) == 0 {
|
||||
if !pullByte(br) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
144
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
144
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Block split point selection utilities. */
|
||||
|
||||
type blockSplit struct {
|
||||
num_types uint
|
||||
num_blocks uint
|
||||
types []byte
|
||||
lengths []uint32
|
||||
types_alloc_size uint
|
||||
lengths_alloc_size uint
|
||||
}
|
||||
|
||||
const (
|
||||
kMaxLiteralHistograms uint = 100
|
||||
kMaxCommandHistograms uint = 50
|
||||
kLiteralBlockSwitchCost float64 = 28.1
|
||||
kCommandBlockSwitchCost float64 = 13.5
|
||||
kDistanceBlockSwitchCost float64 = 14.6
|
||||
kLiteralStrideLength uint = 70
|
||||
kCommandStrideLength uint = 40
|
||||
kSymbolsPerLiteralHistogram uint = 544
|
||||
kSymbolsPerCommandHistogram uint = 530
|
||||
kSymbolsPerDistanceHistogram uint = 544
|
||||
kMinLengthForBlockSplitting uint = 128
|
||||
kIterMulForRefining uint = 2
|
||||
kMinItersForRefining uint = 100
|
||||
)
|
||||
|
||||
func countLiterals(cmds []command) uint {
|
||||
var total_length uint = 0
|
||||
/* Count how many we have. */
|
||||
|
||||
for i := range cmds {
|
||||
total_length += uint(cmds[i].insert_len_)
|
||||
}
|
||||
|
||||
return total_length
|
||||
}
|
||||
|
||||
func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) {
|
||||
var pos uint = 0
|
||||
var from_pos uint = offset & mask
|
||||
for i := range cmds {
|
||||
var insert_len uint = uint(cmds[i].insert_len_)
|
||||
if from_pos+insert_len > mask {
|
||||
var head_size uint = mask + 1 - from_pos
|
||||
copy(literals[pos:], data[from_pos:][:head_size])
|
||||
from_pos = 0
|
||||
pos += head_size
|
||||
insert_len -= head_size
|
||||
}
|
||||
|
||||
if insert_len > 0 {
|
||||
copy(literals[pos:], data[from_pos:][:insert_len])
|
||||
pos += insert_len
|
||||
}
|
||||
|
||||
from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask))
|
||||
}
|
||||
}
|
||||
|
||||
func myRand(seed *uint32) uint32 {
|
||||
/* Initial seed should be 7. In this case, loop length is (1 << 29). */
|
||||
*seed *= 16807
|
||||
|
||||
return *seed
|
||||
}
|
||||
|
||||
func bitCost(count uint) float64 {
|
||||
if count == 0 {
|
||||
return -2.0
|
||||
} else {
|
||||
return fastLog2(count)
|
||||
}
|
||||
}
|
||||
|
||||
const histogramsPerBatch = 64
|
||||
|
||||
const clustersPerBatch = 16
|
||||
|
||||
func initBlockSplit(self *blockSplit) {
|
||||
self.num_types = 0
|
||||
self.num_blocks = 0
|
||||
self.types = self.types[:0]
|
||||
self.lengths = self.lengths[:0]
|
||||
self.types_alloc_size = 0
|
||||
self.lengths_alloc_size = 0
|
||||
}
|
||||
|
||||
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
||||
{
|
||||
var literals_count uint = countLiterals(cmds)
|
||||
var literals []byte = make([]byte, literals_count)
|
||||
|
||||
/* Create a continuous array of literals. */
|
||||
copyLiteralsToByteArray(cmds, data, pos, mask, literals)
|
||||
|
||||
/* Create the block split on the array of literals.
|
||||
Literal histograms have alphabet size 256. */
|
||||
splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split)
|
||||
|
||||
literals = nil
|
||||
}
|
||||
{
|
||||
var insert_and_copy_codes []uint16 = make([]uint16, len(cmds))
|
||||
/* Compute prefix codes for commands. */
|
||||
|
||||
for i := range cmds {
|
||||
insert_and_copy_codes[i] = cmds[i].cmd_prefix_
|
||||
}
|
||||
|
||||
/* Create the block split on the array of command prefixes. */
|
||||
splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
|
||||
|
||||
/* TODO: reuse for distances? */
|
||||
|
||||
insert_and_copy_codes = nil
|
||||
}
|
||||
{
|
||||
var distance_prefixes []uint16 = make([]uint16, len(cmds))
|
||||
var j uint = 0
|
||||
/* Create a continuous array of distance prefixes. */
|
||||
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
|
||||
distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF
|
||||
j++
|
||||
}
|
||||
}
|
||||
|
||||
/* Create the block split on the array of distance prefixes. */
|
||||
splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split)
|
||||
|
||||
distance_prefixes = nil
|
||||
}
|
||||
}
|
434
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
434
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
@ -1,434 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsCommand(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorCommand(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorCommand(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramCommand
|
||||
histogramClearCommand(&sample)
|
||||
randomSampleCommand(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsCommand_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsCommand_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) {
|
||||
var i uint
|
||||
clearHistogramsCommand(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddCommand(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearCommand(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddCommand(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostCommand(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramCommand
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramCommand, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksCommand_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramCommand
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearCommand(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddCommand(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksCommand_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
length := uint(len(data))
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramCommand
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramCommand, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksCommand(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
433
vendor/github.com/andybalholm/brotli/block_splitter_distance.go
generated
vendored
433
vendor/github.com/andybalholm/brotli/block_splitter_distance.go
generated
vendored
@ -1,433 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsDistance(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorDistance(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorDistance(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramDistance
|
||||
histogramClearDistance(&sample)
|
||||
randomSampleDistance(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsDistance_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsDistance_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) {
|
||||
var i uint
|
||||
clearHistogramsDistance(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddDistance(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearDistance(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddDistance(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostDistance(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramDistance
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramDistance, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksDistance_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramDistance
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearDistance(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddDistance(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksDistance_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
var data_size uint = histogramDataSizeDistance()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramDistance
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramDistance, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksDistance(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
433
vendor/github.com/andybalholm/brotli/block_splitter_literal.go
generated
vendored
433
vendor/github.com/andybalholm/brotli/block_splitter_literal.go
generated
vendored
@ -1,433 +0,0 @@
|
||||
package brotli
|
||||
|
||||
import "math"
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
|
||||
var seed uint32 = 7
|
||||
var block_length uint = length / num_histograms
|
||||
var i uint
|
||||
clearHistogramsLiteral(histograms, num_histograms)
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
var pos uint = length * i / num_histograms
|
||||
if i != 0 {
|
||||
pos += uint(myRand(&seed) % uint32(block_length))
|
||||
}
|
||||
|
||||
if pos+stride >= length {
|
||||
pos = length - stride - 1
|
||||
}
|
||||
|
||||
histogramAddVectorLiteral(&histograms[i], data[pos:], stride)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) {
|
||||
var pos uint = 0
|
||||
if stride >= length {
|
||||
stride = length
|
||||
} else {
|
||||
pos = uint(myRand(seed) % uint32(length-stride+1))
|
||||
}
|
||||
|
||||
histogramAddVectorLiteral(sample, data[pos:], stride)
|
||||
}
|
||||
|
||||
func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) {
|
||||
var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining
|
||||
var seed uint32 = 7
|
||||
var iter uint
|
||||
iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms
|
||||
for iter = 0; iter < iters; iter++ {
|
||||
var sample histogramLiteral
|
||||
histogramClearLiteral(&sample)
|
||||
randomSampleLiteral(&seed, data, length, stride, &sample)
|
||||
histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample)
|
||||
}
|
||||
}
|
||||
|
||||
/* Assigns a block id from the range [0, num_histograms) to each data element
|
||||
in data[0..length) and fills in block_id[0..length) with the assigned values.
|
||||
Returns the number of blocks, i.e. one plus the number of block switches. */
|
||||
func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var num_blocks uint = 1
|
||||
var i uint
|
||||
var j uint
|
||||
assert(num_histograms <= 256)
|
||||
if num_histograms <= 1 {
|
||||
for i = 0; i < length; i++ {
|
||||
block_id[i] = 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := 0; i < int(data_size*num_histograms); i++ {
|
||||
insert_cost[i] = 0
|
||||
}
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_)))
|
||||
}
|
||||
|
||||
for i = data_size; i != 0; {
|
||||
i--
|
||||
for j = 0; j < num_histograms; j++ {
|
||||
insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i]))
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(num_histograms); i++ {
|
||||
cost[i] = 0
|
||||
}
|
||||
for i := 0; i < int(length*bitmaplen); i++ {
|
||||
switch_signal[i] = 0
|
||||
}
|
||||
|
||||
/* After each iteration of this loop, cost[k] will contain the difference
|
||||
between the minimum cost of arriving at the current byte position using
|
||||
entropy code k, and the minimum cost of arriving at the current byte
|
||||
position. This difference is capped at the block switch cost, and if it
|
||||
reaches block switch cost, it means that when we trace back from the last
|
||||
position, we need to switch here. */
|
||||
for i = 0; i < length; i++ {
|
||||
var byte_ix uint = i
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms
|
||||
var min_cost float64 = 1e99
|
||||
var block_switch_cost float64 = block_switch_bitcost
|
||||
var k uint
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
/* We are coding the symbol in data[byte_ix] with entropy code k. */
|
||||
cost[k] += insert_cost[insert_cost_ix+k]
|
||||
|
||||
if cost[k] < min_cost {
|
||||
min_cost = cost[k]
|
||||
block_id[byte_ix] = byte(k)
|
||||
}
|
||||
}
|
||||
|
||||
/* More blocks for the beginning. */
|
||||
if byte_ix < 2000 {
|
||||
block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000
|
||||
}
|
||||
|
||||
for k = 0; k < num_histograms; k++ {
|
||||
cost[k] -= min_cost
|
||||
if cost[k] >= block_switch_cost {
|
||||
var mask byte = byte(1 << (k & 7))
|
||||
cost[k] = block_switch_cost
|
||||
assert(k>>3 < bitmaplen)
|
||||
switch_signal[ix+(k>>3)] |= mask
|
||||
/* Trace back from the last position and switch at the marked places. */
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var byte_ix uint = length - 1
|
||||
var ix uint = byte_ix * bitmaplen
|
||||
var cur_id byte = block_id[byte_ix]
|
||||
for byte_ix > 0 {
|
||||
var mask byte = byte(1 << (cur_id & 7))
|
||||
assert(uint(cur_id)>>3 < bitmaplen)
|
||||
byte_ix--
|
||||
ix -= bitmaplen
|
||||
if switch_signal[ix+uint(cur_id>>3)]&mask != 0 {
|
||||
if cur_id != block_id[byte_ix] {
|
||||
cur_id = block_id[byte_ix]
|
||||
num_blocks++
|
||||
}
|
||||
}
|
||||
|
||||
block_id[byte_ix] = cur_id
|
||||
}
|
||||
}
|
||||
|
||||
return num_blocks
|
||||
}
|
||||
|
||||
var remapBlockIdsLiteral_kInvalidId uint16 = 256
|
||||
|
||||
func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint {
|
||||
var next_id uint16 = 0
|
||||
var i uint
|
||||
for i = 0; i < num_histograms; i++ {
|
||||
new_id[i] = remapBlockIdsLiteral_kInvalidId
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId {
|
||||
new_id[block_ids[i]] = next_id
|
||||
next_id++
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < length; i++ {
|
||||
block_ids[i] = byte(new_id[block_ids[i]])
|
||||
assert(uint(block_ids[i]) < num_histograms)
|
||||
}
|
||||
|
||||
assert(uint(next_id) <= num_histograms)
|
||||
return uint(next_id)
|
||||
}
|
||||
|
||||
func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) {
|
||||
var i uint
|
||||
clearHistogramsLiteral(histograms, num_histograms)
|
||||
for i = 0; i < length; i++ {
|
||||
histogramAddLiteral(&histograms[block_ids[i]], uint(data[i]))
|
||||
}
|
||||
}
|
||||
|
||||
var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32
|
||||
|
||||
func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) {
|
||||
var histogram_symbols []uint32 = make([]uint32, num_blocks)
|
||||
var block_lengths []uint32 = make([]uint32, num_blocks)
|
||||
var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch
|
||||
var all_histograms_size uint = 0
|
||||
var all_histograms_capacity uint = expected_num_clusters
|
||||
var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity)
|
||||
var cluster_size_size uint = 0
|
||||
var cluster_size_capacity uint = expected_num_clusters
|
||||
var cluster_size []uint32 = make([]uint32, cluster_size_capacity)
|
||||
var num_clusters uint = 0
|
||||
var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch))
|
||||
var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2
|
||||
var pairs_capacity uint = max_num_pairs + 1
|
||||
var pairs []histogramPair = make([]histogramPair, pairs_capacity)
|
||||
var pos uint = 0
|
||||
var clusters []uint32
|
||||
var num_final_clusters uint
|
||||
var new_index []uint32
|
||||
var i uint
|
||||
var sizes = [histogramsPerBatch]uint32{0}
|
||||
var new_clusters = [histogramsPerBatch]uint32{0}
|
||||
var symbols = [histogramsPerBatch]uint32{0}
|
||||
var remap = [histogramsPerBatch]uint32{0}
|
||||
|
||||
for i := 0; i < int(num_blocks); i++ {
|
||||
block_lengths[i] = 0
|
||||
}
|
||||
{
|
||||
var block_idx uint = 0
|
||||
for i = 0; i < length; i++ {
|
||||
assert(block_idx < num_blocks)
|
||||
block_lengths[block_idx]++
|
||||
if i+1 == length || block_ids[i] != block_ids[i+1] {
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
assert(block_idx == num_blocks)
|
||||
}
|
||||
|
||||
for i = 0; i < num_blocks; i += histogramsPerBatch {
|
||||
var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch)
|
||||
var num_new_clusters uint
|
||||
var j uint
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
var k uint
|
||||
histogramClearLiteral(&histograms[j])
|
||||
for k = 0; uint32(k) < block_lengths[i+j]; k++ {
|
||||
histogramAddLiteral(&histograms[j], uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
histograms[j].bit_cost_ = populationCostLiteral(&histograms[j])
|
||||
new_clusters[j] = uint32(j)
|
||||
symbols[j] = uint32(j)
|
||||
sizes[j] = 1
|
||||
}
|
||||
|
||||
num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs)
|
||||
if all_histograms_capacity < (all_histograms_size + num_new_clusters) {
|
||||
var _new_size uint
|
||||
if all_histograms_capacity == 0 {
|
||||
_new_size = all_histograms_size + num_new_clusters
|
||||
} else {
|
||||
_new_size = all_histograms_capacity
|
||||
}
|
||||
var new_array []histogramLiteral
|
||||
for _new_size < (all_histograms_size + num_new_clusters) {
|
||||
_new_size *= 2
|
||||
}
|
||||
new_array = make([]histogramLiteral, _new_size)
|
||||
if all_histograms_capacity != 0 {
|
||||
copy(new_array, all_histograms[:all_histograms_capacity])
|
||||
}
|
||||
|
||||
all_histograms = new_array
|
||||
all_histograms_capacity = _new_size
|
||||
}
|
||||
|
||||
brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters)
|
||||
for j = 0; j < num_new_clusters; j++ {
|
||||
all_histograms[all_histograms_size] = histograms[new_clusters[j]]
|
||||
all_histograms_size++
|
||||
cluster_size[cluster_size_size] = sizes[new_clusters[j]]
|
||||
cluster_size_size++
|
||||
remap[new_clusters[j]] = uint32(j)
|
||||
}
|
||||
|
||||
for j = 0; j < num_to_combine; j++ {
|
||||
histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]]
|
||||
}
|
||||
|
||||
num_clusters += num_new_clusters
|
||||
assert(num_clusters == cluster_size_size)
|
||||
assert(num_clusters == all_histograms_size)
|
||||
}
|
||||
|
||||
histograms = nil
|
||||
|
||||
max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters)
|
||||
if pairs_capacity < max_num_pairs+1 {
|
||||
pairs = nil
|
||||
pairs = make([]histogramPair, (max_num_pairs + 1))
|
||||
}
|
||||
|
||||
clusters = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
clusters[i] = uint32(i)
|
||||
}
|
||||
|
||||
num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs)
|
||||
pairs = nil
|
||||
cluster_size = nil
|
||||
|
||||
new_index = make([]uint32, num_clusters)
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
new_index[i] = clusterBlocksLiteral_kInvalidIndex
|
||||
}
|
||||
pos = 0
|
||||
{
|
||||
var next_index uint32 = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
var histo histogramLiteral
|
||||
var j uint
|
||||
var best_out uint32
|
||||
var best_bits float64
|
||||
histogramClearLiteral(&histo)
|
||||
for j = 0; uint32(j) < block_lengths[i]; j++ {
|
||||
histogramAddLiteral(&histo, uint(data[pos]))
|
||||
pos++
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
best_out = histogram_symbols[0]
|
||||
} else {
|
||||
best_out = histogram_symbols[i-1]
|
||||
}
|
||||
best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out])
|
||||
for j = 0; j < num_final_clusters; j++ {
|
||||
var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]])
|
||||
if cur_bits < best_bits {
|
||||
best_bits = cur_bits
|
||||
best_out = clusters[j]
|
||||
}
|
||||
}
|
||||
|
||||
histogram_symbols[i] = best_out
|
||||
if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex {
|
||||
new_index[best_out] = next_index
|
||||
next_index++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clusters = nil
|
||||
all_histograms = nil
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks)
|
||||
{
|
||||
var cur_length uint32 = 0
|
||||
var block_idx uint = 0
|
||||
var max_type byte = 0
|
||||
for i = 0; i < num_blocks; i++ {
|
||||
cur_length += block_lengths[i]
|
||||
if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] {
|
||||
var id byte = byte(new_index[histogram_symbols[i]])
|
||||
split.types[block_idx] = id
|
||||
split.lengths[block_idx] = cur_length
|
||||
max_type = brotli_max_uint8_t(max_type, id)
|
||||
cur_length = 0
|
||||
block_idx++
|
||||
}
|
||||
}
|
||||
|
||||
split.num_blocks = block_idx
|
||||
split.num_types = uint(max_type) + 1
|
||||
}
|
||||
|
||||
new_index = nil
|
||||
block_lengths = nil
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
var data_size uint = histogramDataSizeLiteral()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramLiteral
|
||||
if num_histograms > max_histograms {
|
||||
num_histograms = max_histograms
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
split.num_types = 1
|
||||
return
|
||||
} else if length < kMinLengthForBlockSplitting {
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1)
|
||||
split.num_types = 1
|
||||
split.types[split.num_blocks] = 0
|
||||
split.lengths[split.num_blocks] = uint32(length)
|
||||
split.num_blocks++
|
||||
return
|
||||
}
|
||||
|
||||
histograms = make([]histogramLiteral, num_histograms)
|
||||
|
||||
/* Find good entropy codes. */
|
||||
initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
|
||||
refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms)
|
||||
{
|
||||
var block_ids []byte = make([]byte, length)
|
||||
var num_blocks uint = 0
|
||||
var bitmaplen uint = (num_histograms + 7) >> 3
|
||||
var insert_cost []float64 = make([]float64, (data_size * num_histograms))
|
||||
var cost []float64 = make([]float64, num_histograms)
|
||||
var switch_signal []byte = make([]byte, (length * bitmaplen))
|
||||
var new_id []uint16 = make([]uint16, num_histograms)
|
||||
var iters uint
|
||||
if params.quality < hqZopflificationQuality {
|
||||
iters = 3
|
||||
} else {
|
||||
iters = 10
|
||||
}
|
||||
/* Find a good path through literals with the good entropy codes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < iters; i++ {
|
||||
num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids)
|
||||
num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms)
|
||||
buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms)
|
||||
}
|
||||
|
||||
insert_cost = nil
|
||||
cost = nil
|
||||
switch_signal = nil
|
||||
new_id = nil
|
||||
histograms = nil
|
||||
clusterBlocksLiteral(data, length, num_blocks, block_ids, split)
|
||||
block_ids = nil
|
||||
}
|
||||
}
|
1300
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
1300
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
File diff suppressed because it is too large
Load Diff
30
vendor/github.com/andybalholm/brotli/cluster.go
generated
vendored
30
vendor/github.com/andybalholm/brotli/cluster.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Functions for clustering similar histograms together. */
|
||||
|
||||
type histogramPair struct {
|
||||
idx1 uint32
|
||||
idx2 uint32
|
||||
cost_combo float64
|
||||
cost_diff float64
|
||||
}
|
||||
|
||||
func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool {
|
||||
if p1.cost_diff != p2.cost_diff {
|
||||
return p1.cost_diff > p2.cost_diff
|
||||
}
|
||||
|
||||
return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1)
|
||||
}
|
||||
|
||||
/* Returns entropy reduction of the context map when we combine two clusters. */
|
||||
func clusterCostDiff(size_a uint, size_b uint) float64 {
|
||||
var size_c uint = size_a + size_b
|
||||
return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c)
|
||||
}
|
164
vendor/github.com/andybalholm/brotli/cluster_command.go
generated
vendored
164
vendor/github.com/andybalholm/brotli/cluster_command.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
package brotli
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
|
||||
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
|
||||
func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
|
||||
var is_good_pair bool = false
|
||||
var p histogramPair
|
||||
p.idx2 = 0
|
||||
p.idx1 = p.idx2
|
||||
p.cost_combo = 0
|
||||
p.cost_diff = p.cost_combo
|
||||
if idx1 == idx2 {
|
||||
return
|
||||
}
|
||||
|
||||
if idx2 < idx1 {
|
||||
var t uint32 = idx2
|
||||
idx2 = idx1
|
||||
idx1 = t
|
||||
}
|
||||
|
||||
p.idx1 = idx1
|
||||
p.idx2 = idx2
|
||||
p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2]))
|
||||
p.cost_diff -= out[idx1].bit_cost_
|
||||
p.cost_diff -= out[idx2].bit_cost_
|
||||
|
||||
if out[idx1].total_count_ == 0 {
|
||||
p.cost_combo = out[idx2].bit_cost_
|
||||
is_good_pair = true
|
||||
} else if out[idx2].total_count_ == 0 {
|
||||
p.cost_combo = out[idx1].bit_cost_
|
||||
is_good_pair = true
|
||||
} else {
|
||||
var threshold float64
|
||||
if *num_pairs == 0 {
|
||||
threshold = 1e99
|
||||
} else {
|
||||
threshold = brotli_max_double(0.0, pairs[0].cost_diff)
|
||||
}
|
||||
var combo histogramCommand = out[idx1]
|
||||
var cost_combo float64
|
||||
histogramAddHistogramCommand(&combo, &out[idx2])
|
||||
cost_combo = populationCostCommand(&combo)
|
||||
if cost_combo < threshold-p.cost_diff {
|
||||
p.cost_combo = cost_combo
|
||||
is_good_pair = true
|
||||
}
|
||||
}
|
||||
|
||||
if is_good_pair {
|
||||
p.cost_diff += p.cost_combo
|
||||
if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = pairs[0]
|
||||
(*num_pairs)++
|
||||
}
|
||||
|
||||
pairs[0] = p
|
||||
} else if *num_pairs < max_num_pairs {
|
||||
pairs[*num_pairs] = p
|
||||
(*num_pairs)++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint {
|
||||
var cost_diff_threshold float64 = 0.0
|
||||
var min_cluster_size uint = 1
|
||||
var num_pairs uint = 0
|
||||
{
|
||||
/* We maintain a vector of histogram pairs, with the property that the pair
|
||||
with the maximum bit cost reduction is the first. */
|
||||
var idx1 uint
|
||||
for idx1 = 0; idx1 < num_clusters; idx1++ {
|
||||
var idx2 uint
|
||||
for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ {
|
||||
compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for num_clusters > min_cluster_size {
|
||||
var best_idx1 uint32
|
||||
var best_idx2 uint32
|
||||
var i uint
|
||||
if pairs[0].cost_diff >= cost_diff_threshold {
|
||||
cost_diff_threshold = 1e99
|
||||
min_cluster_size = max_clusters
|
||||
continue
|
||||
}
|
||||
|
||||
/* Take the best pair from the top of heap. */
|
||||
best_idx1 = pairs[0].idx1
|
||||
|
||||
best_idx2 = pairs[0].idx2
|
||||
histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2])
|
||||
out[best_idx1].bit_cost_ = pairs[0].cost_combo
|
||||
cluster_size[best_idx1] += cluster_size[best_idx2]
|
||||
for i = 0; i < symbols_size; i++ {
|
||||
if symbols[i] == best_idx2 {
|
||||
symbols[i] = best_idx1
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
if clusters[i] == best_idx2 {
|
||||
copy(clusters[i:], clusters[i+1:][:num_clusters-i-1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
num_clusters--
|
||||
{
|
||||
/* Remove pairs intersecting the just combined best pair. */
|
||||
var copy_to_idx uint = 0
|
||||
for i = 0; i < num_pairs; i++ {
|
||||
var p *histogramPair = &pairs[i]
|
||||
if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 {
|
||||
/* Remove invalid pair from the queue. */
|
||||
continue
|
||||
}
|
||||
|
||||
if histogramPairIsLess(&pairs[0], p) {
|
||||
/* Replace the top of the queue if needed. */
|
||||
var front histogramPair = pairs[0]
|
||||
pairs[0] = *p
|
||||
pairs[copy_to_idx] = front
|
||||
} else {
|
||||
pairs[copy_to_idx] = *p
|
||||
}
|
||||
|
||||
copy_to_idx++
|
||||
}
|
||||
|
||||
num_pairs = copy_to_idx
|
||||
}
|
||||
|
||||
/* Push new pairs formed with the combined histogram to the heap. */
|
||||
for i = 0; i < num_clusters; i++ {
|
||||
compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs)
|
||||
}
|
||||
}
|
||||
|
||||
return num_clusters
|
||||
}
|
||||
|
||||
/* What is the bit cost of moving histogram from cur_symbol to candidate. */
|
||||
func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 {
|
||||
if histogram.total_count_ == 0 {
|
||||
return 0.0
|
||||
} else {
|
||||
var tmp histogramCommand = *histogram
|
||||
histogramAddHistogramCommand(&tmp, candidate)
|
||||
return populationCostCommand(&tmp) - candidate.bit_cost_
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user