mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-27 22:39:57 +00:00
Add db-connect, a SQL over HTTPS server
This commit is contained in:
28
vendor/github.com/kshvakov/clickhouse/.gitignore
generated
vendored
Normal file
28
vendor/github.com/kshvakov/clickhouse/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.out
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
coverage.txt
|
||||
.idea/**
|
25
vendor/github.com/kshvakov/clickhouse/.travis.yml
generated
vendored
Normal file
25
vendor/github.com/kshvakov/clickhouse/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- master
|
||||
go_import_path: github.com/kshvakov/clickhouse
|
||||
services:
|
||||
- docker
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/cloudflare/golz4
|
||||
- go get github.com/bkaradzic/go-lz4
|
||||
- go get github.com/pierrec/lz4
|
||||
|
||||
before_install:
|
||||
- docker --version
|
||||
- docker-compose --version
|
||||
- docker-compose up -d
|
||||
script:
|
||||
- ./go.test.sh
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
10
vendor/github.com/kshvakov/clickhouse/CONTRIBUTING.md
generated
vendored
Normal file
10
vendor/github.com/kshvakov/clickhouse/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Contributing notes
|
||||
|
||||
## Local setup
|
||||
|
||||
The easiest way to run tests is to use Docker Compose:
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
make
|
||||
```
|
21
vendor/github.com/kshvakov/clickhouse/LICENSE
generated
vendored
Normal file
21
vendor/github.com/kshvakov/clickhouse/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Kirill Shvakov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
8
vendor/github.com/kshvakov/clickhouse/Makefile
generated
vendored
Normal file
8
vendor/github.com/kshvakov/clickhouse/Makefile
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
test:
|
||||
go install -race -v
|
||||
go test -i -v
|
||||
go test -race -timeout 30s -v .
|
||||
|
||||
coverage:
|
||||
go test -coverprofile=coverage.out -v .
|
||||
go tool cover -html=coverage.out
|
185
vendor/github.com/kshvakov/clickhouse/README.md
generated
vendored
Normal file
185
vendor/github.com/kshvakov/clickhouse/README.md
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
# ClickHouse [](https://travis-ci.org/kshvakov/clickhouse) [](https://goreportcard.com/report/github.com/kshvakov/clickhouse) [](https://codecov.io/gh/kshvakov/clickhouse)
|
||||
|
||||
Golang SQL database driver for [Yandex ClickHouse](https://clickhouse.yandex/)
|
||||
|
||||
## Key features
|
||||
|
||||
* Uses native ClickHouse tcp client-server protocol
|
||||
* Compatibility with `database/sql`
|
||||
* Round Robin load-balancing
|
||||
* Bulk write support : `begin->prepare->(in loop exec)->commit`
|
||||
* LZ4 compression support (default to use pure go lz4, switch to use cgo lz4 by turn clz4 build tags on)
|
||||
|
||||
## DSN
|
||||
|
||||
* username/password - auth credentials
|
||||
* database - select the current default database
|
||||
* read_timeout/write_timeout - timeout in second
|
||||
* no_delay - disable/enable the Nagle Algorithm for tcp socket (default is 'true' - disable)
|
||||
* alt_hosts - comma separated list of single address host for load-balancing
|
||||
* connection_open_strategy - random/in_order (default random).
|
||||
* random - choose random server from set
|
||||
* in_order - first live server is choosen in specified order
|
||||
* block_size - maximum rows in block (default is 1000000). If the rows are larger then the data will be split into several blocks to send them to the server
|
||||
* pool size - maximum amount of preallocated byte chunks used in queries (default is 100). Decrease this if you experience memory problems at the expense of more GC pressure and vice versa.
|
||||
* debug - enable debug output (boolean value)
|
||||
|
||||
SSL/TLS parameters:
|
||||
|
||||
* secure - establish secure connection (default is false)
|
||||
* skip_verify - skip certificate verification (default is false)
|
||||
* tls_config - name of a TLS config with client certificates, registered using `clickhouse.RegisterTLSConfig()`; implies secure to be true, unless explicitly specified
|
||||
|
||||
example:
|
||||
```
|
||||
tcp://host1:9000?username=user&password=qwerty&database=clicks&read_timeout=10&write_timeout=20&alt_hosts=host2:9000,host3:9000
|
||||
```
|
||||
|
||||
## Supported data types
|
||||
|
||||
* UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64
|
||||
* Float32, Float64
|
||||
* String
|
||||
* FixedString(N)
|
||||
* Date
|
||||
* DateTime
|
||||
* IPv4
|
||||
* IPv6
|
||||
* Enum
|
||||
* UUID
|
||||
* Nullable(T)
|
||||
* [Array(T) (one-dimensional)](https://clickhouse.yandex/reference_en.html#Array(T)) [godoc](https://godoc.org/github.com/kshvakov/clickhouse#Array)
|
||||
|
||||
## TODO
|
||||
|
||||
* Support other compression methods(zstd ...)
|
||||
|
||||
## Install
|
||||
```
|
||||
go get -u github.com/kshvakov/clickhouse
|
||||
```
|
||||
|
||||
## Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sql.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := connect.Ping(); err != nil {
|
||||
if exception, ok := err.(*clickhouse.Exception); ok {
|
||||
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
|
||||
} else {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err = connect.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS example (
|
||||
country_code FixedString(2),
|
||||
os_id UInt8,
|
||||
browser_id UInt8,
|
||||
categories Array(Int16),
|
||||
action_day Date,
|
||||
action_time DateTime
|
||||
) engine=Memory
|
||||
`)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var (
|
||||
tx, _ = connect.Begin()
|
||||
stmt, _ = tx.Prepare("INSERT INTO example (country_code, os_id, browser_id, categories, action_day, action_time) VALUES (?, ?, ?, ?, ?, ?)")
|
||||
)
|
||||
defer stmt.Close()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if _, err := stmt.Exec(
|
||||
"RU",
|
||||
10+i,
|
||||
100+i,
|
||||
clickhouse.Array([]int16{1, 2, 3}),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rows, err := connect.Query("SELECT country_code, os_id, browser_id, categories, action_day, action_time FROM example")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
country string
|
||||
os, browser uint8
|
||||
categories []int16
|
||||
actionDay, actionTime time.Time
|
||||
)
|
||||
if err := rows.Scan(&country, &os, &browser, &categories, &actionDay, &actionTime); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_day: %s, action_time: %s", country, os, browser, categories, actionDay, actionTime)
|
||||
}
|
||||
|
||||
if _, err := connect.Exec("DROP TABLE example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use [sqlx](https://github.com/jmoiron/sqlx)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/kshvakov/clickhouse"
|
||||
)
|
||||
|
||||
func main() {
|
||||
connect, err := sqlx.Open("clickhouse", "tcp://127.0.0.1:9000?debug=true")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var items []struct {
|
||||
CountryCode string `db:"country_code"`
|
||||
OsID uint8 `db:"os_id"`
|
||||
BrowserID uint8 `db:"browser_id"`
|
||||
Categories []int16 `db:"categories"`
|
||||
ActionTime time.Time `db:"action_time"`
|
||||
}
|
||||
|
||||
if err := connect.Select(&items, "SELECT country_code, os_id, browser_id, categories, action_time FROM example"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_time: %s", item.CountryCode, item.OsID, item.BrowserID, item.Categories, item.ActionTime)
|
||||
}
|
||||
}
|
||||
```
|
21
vendor/github.com/kshvakov/clickhouse/array.go
generated
vendored
Normal file
21
vendor/github.com/kshvakov/clickhouse/array.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func Array(v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayFixedString(len int, v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDate(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func ArrayDateTime(v []time.Time) interface{} {
|
||||
return v
|
||||
}
|
247
vendor/github.com/kshvakov/clickhouse/bootstrap.go
generated
vendored
Normal file
247
vendor/github.com/kshvakov/clickhouse/bootstrap.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/leakypool"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDatabase when connecting to ClickHouse
|
||||
DefaultDatabase = "default"
|
||||
// DefaultUsername when connecting to ClickHouse
|
||||
DefaultUsername = "default"
|
||||
// DefaultConnTimeout when connecting to ClickHouse
|
||||
DefaultConnTimeout = 5 * time.Second
|
||||
// DefaultReadTimeout when reading query results
|
||||
DefaultReadTimeout = time.Minute
|
||||
// DefaultWriteTimeout when sending queries
|
||||
DefaultWriteTimeout = time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
unixtime int64
|
||||
logOutput io.Writer = os.Stdout
|
||||
hostname, _ = os.Hostname()
|
||||
poolInit sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register("clickhouse", &bootstrap{})
|
||||
go func() {
|
||||
for tick := time.Tick(time.Second); ; {
|
||||
select {
|
||||
case <-tick:
|
||||
atomic.AddInt64(&unixtime, int64(time.Second))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func now() time.Time {
|
||||
return time.Unix(atomic.LoadInt64(&unixtime), 0)
|
||||
}
|
||||
|
||||
type bootstrap struct{}
|
||||
|
||||
func (d *bootstrap) Open(dsn string) (driver.Conn, error) {
|
||||
return Open(dsn)
|
||||
}
|
||||
|
||||
// SetLogOutput allows to change output of the default logger
|
||||
func SetLogOutput(output io.Writer) {
|
||||
logOutput = output
|
||||
}
|
||||
|
||||
// Open the connection
|
||||
func Open(dsn string) (driver.Conn, error) {
|
||||
return open(dsn)
|
||||
}
|
||||
|
||||
func open(dsn string) (*clickhouse, error) {
|
||||
url, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
hosts = []string{url.Host}
|
||||
query = url.Query()
|
||||
secure = false
|
||||
skipVerify = false
|
||||
tlsConfigName = query.Get("tls_config")
|
||||
noDelay = true
|
||||
compress = false
|
||||
database = query.Get("database")
|
||||
username = query.Get("username")
|
||||
password = query.Get("password")
|
||||
blockSize = 1000000
|
||||
connTimeout = DefaultConnTimeout
|
||||
readTimeout = DefaultReadTimeout
|
||||
writeTimeout = DefaultWriteTimeout
|
||||
connOpenStrategy = connOpenRandom
|
||||
poolSize = 100
|
||||
)
|
||||
if len(database) == 0 {
|
||||
database = DefaultDatabase
|
||||
}
|
||||
if len(username) == 0 {
|
||||
username = DefaultUsername
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("no_delay")); err == nil {
|
||||
noDelay = v
|
||||
}
|
||||
tlsConfig := getTLSConfigClone(tlsConfigName)
|
||||
if tlsConfigName != "" && tlsConfig == nil {
|
||||
return nil, fmt.Errorf("invalid tls_config - no config registered under name %s", tlsConfigName)
|
||||
}
|
||||
secure = tlsConfig != nil
|
||||
if v, err := strconv.ParseBool(query.Get("secure")); err == nil {
|
||||
secure = v
|
||||
}
|
||||
if v, err := strconv.ParseBool(query.Get("skip_verify")); err == nil {
|
||||
skipVerify = v
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("timeout"), 64); err == nil {
|
||||
connTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("read_timeout"), 64); err == nil {
|
||||
readTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if duration, err := strconv.ParseFloat(query.Get("write_timeout"), 64); err == nil {
|
||||
writeTimeout = time.Duration(duration * float64(time.Second))
|
||||
}
|
||||
if size, err := strconv.ParseInt(query.Get("block_size"), 10, 64); err == nil {
|
||||
blockSize = int(size)
|
||||
}
|
||||
if size, err := strconv.ParseInt(query.Get("pool_size"), 10, 64); err == nil {
|
||||
poolSize = int(size)
|
||||
}
|
||||
poolInit.Do(func() {
|
||||
leakypool.InitBytePool(poolSize)
|
||||
})
|
||||
if altHosts := strings.Split(query.Get("alt_hosts"), ","); len(altHosts) != 0 {
|
||||
for _, host := range altHosts {
|
||||
if len(host) != 0 {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch query.Get("connection_open_strategy") {
|
||||
case "random":
|
||||
connOpenStrategy = connOpenRandom
|
||||
case "in_order":
|
||||
connOpenStrategy = connOpenInOrder
|
||||
}
|
||||
|
||||
settings, err := makeQuerySettings(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v, err := strconv.ParseBool(query.Get("compress")); err == nil {
|
||||
compress = v
|
||||
}
|
||||
|
||||
var (
|
||||
ch = clickhouse{
|
||||
logf: func(string, ...interface{}) {},
|
||||
settings: settings,
|
||||
compress: compress,
|
||||
blockSize: blockSize,
|
||||
ServerInfo: data.ServerInfo{
|
||||
Timezone: time.Local,
|
||||
},
|
||||
}
|
||||
logger = log.New(logOutput, "[clickhouse]", 0)
|
||||
)
|
||||
if debug, err := strconv.ParseBool(url.Query().Get("debug")); err == nil && debug {
|
||||
ch.logf = logger.Printf
|
||||
}
|
||||
ch.logf("host(s)=%s, database=%s, username=%s",
|
||||
strings.Join(hosts, ", "),
|
||||
database,
|
||||
username,
|
||||
)
|
||||
options := connOptions{
|
||||
secure: secure,
|
||||
tlsConfig: tlsConfig,
|
||||
skipVerify: skipVerify,
|
||||
hosts: hosts,
|
||||
connTimeout: connTimeout,
|
||||
readTimeout: readTimeout,
|
||||
writeTimeout: writeTimeout,
|
||||
noDelay: noDelay,
|
||||
openStrategy: connOpenStrategy,
|
||||
logf: ch.logf,
|
||||
}
|
||||
if ch.conn, err = dial(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.SetPrefix(fmt.Sprintf("[clickhouse][connect=%d]", ch.conn.ident))
|
||||
ch.buffer = bufio.NewWriter(ch.conn)
|
||||
|
||||
ch.decoder = binary.NewDecoder(ch.conn)
|
||||
ch.encoder = binary.NewEncoder(ch.buffer)
|
||||
|
||||
if err := ch.hello(database, username, password); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) hello(database, username, password string) error {
|
||||
ch.logf("[hello] -> %s", ch.ClientInfo)
|
||||
{
|
||||
ch.encoder.Uvarint(protocol.ClientHello)
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
{
|
||||
ch.encoder.String(database)
|
||||
ch.encoder.String(username)
|
||||
ch.encoder.String(password)
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
{
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
return ch.exception()
|
||||
case protocol.ServerHello:
|
||||
if err := ch.ServerInfo.Read(ch.decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[bootstrap] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return fmt.Errorf("[hello] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
ch.logf("[hello] <- %s", ch.ServerInfo)
|
||||
return nil
|
||||
}
|
320
vendor/github.com/kshvakov/clickhouse/clickhouse.go
generated
vendored
Normal file
320
vendor/github.com/kshvakov/clickhouse/clickhouse.go
generated
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
"github.com/kshvakov/clickhouse/lib/column"
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
"github.com/kshvakov/clickhouse/lib/types"
|
||||
)
|
||||
|
||||
type (
|
||||
Date = types.Date
|
||||
DateTime = types.DateTime
|
||||
UUID = types.UUID
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInsertInNotBatchMode = errors.New("insert statement supported only in the batch mode (use begin/commit)")
|
||||
ErrLimitDataRequestInTx = errors.New("data request has already been prepared in transaction")
|
||||
)
|
||||
|
||||
var (
|
||||
splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`)
|
||||
)
|
||||
|
||||
type logger func(format string, v ...interface{})
|
||||
|
||||
type clickhouse struct {
|
||||
sync.Mutex
|
||||
data.ServerInfo
|
||||
data.ClientInfo
|
||||
logf logger
|
||||
conn *connect
|
||||
block *data.Block
|
||||
buffer *bufio.Writer
|
||||
decoder *binary.Decoder
|
||||
encoder *binary.Encoder
|
||||
settings *querySettings
|
||||
compress bool
|
||||
blockSize int
|
||||
inTransaction bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Prepare(query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(context.Background(), query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
return ch.prepareContext(ctx, query)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) prepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
ch.logf("[prepare] %s", query)
|
||||
switch {
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
case ch.block != nil:
|
||||
return nil, ErrLimitDataRequestInTx
|
||||
case isInsert(query):
|
||||
if !ch.inTransaction {
|
||||
return nil, ErrInsertInNotBatchMode
|
||||
}
|
||||
return ch.insert(query)
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
query: query,
|
||||
numInput: numInput(query),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) insert(query string) (_ driver.Stmt, err error) {
|
||||
if err := ch.sendQuery(splitInsertRe.Split(query, -1)[0] + " VALUES "); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ch.block, err = ch.readMeta(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stmt{
|
||||
ch: ch,
|
||||
isInsert: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Begin() (driver.Tx, error) {
|
||||
return ch.beginTx(context.Background(), txOptions{})
|
||||
}
|
||||
|
||||
func (ch *clickhouse) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
return ch.beginTx(ctx, txOptions{
|
||||
Isolation: int(opts.Isolation),
|
||||
ReadOnly: opts.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
type txOptions struct {
|
||||
Isolation int
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) beginTx(ctx context.Context, opts txOptions) (*clickhouse, error) {
|
||||
ch.logf("[begin] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
switch {
|
||||
case ch.inTransaction:
|
||||
return nil, sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
if finish := ch.watchCancel(ctx); finish != nil {
|
||||
defer finish()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.inTransaction = true
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Commit() error {
|
||||
ch.logf("[commit] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
defer func() {
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
ch.block = nil
|
||||
}
|
||||
ch.inTransaction = false
|
||||
}()
|
||||
switch {
|
||||
case !ch.inTransaction:
|
||||
return sql.ErrTxDone
|
||||
case ch.conn.closed:
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
if ch.block != nil {
|
||||
if err := ch.writeBlock(ch.block); err != nil {
|
||||
return err
|
||||
}
|
||||
// Send empty block as marker of end of data.
|
||||
if err := ch.writeBlock(&data.Block{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Rollback() error {
|
||||
ch.logf("[rollback] tx=%t, data=%t", ch.inTransaction, ch.block != nil)
|
||||
if !ch.inTransaction {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
if ch.block != nil {
|
||||
ch.block.Reset()
|
||||
}
|
||||
ch.block = nil
|
||||
ch.buffer = nil
|
||||
ch.inTransaction = false
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) CheckNamedValue(nv *driver.NamedValue) error {
|
||||
switch nv.Value.(type) {
|
||||
case column.IP, column.UUID:
|
||||
return nil
|
||||
case nil, []byte, int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64, string, time.Time:
|
||||
return nil
|
||||
}
|
||||
switch v := nv.Value.(type) {
|
||||
case
|
||||
[]int, []int8, []int16, []int32, []int64,
|
||||
[]uint, []uint8, []uint16, []uint32, []uint64,
|
||||
[]float32, []float64,
|
||||
[]string:
|
||||
return nil
|
||||
case net.IP:
|
||||
return nil
|
||||
case driver.Valuer:
|
||||
value, err := v.Value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nv.Value = value
|
||||
default:
|
||||
switch value := reflect.ValueOf(nv.Value); value.Kind() {
|
||||
case reflect.Slice:
|
||||
return nil
|
||||
case reflect.Bool:
|
||||
nv.Value = uint8(0)
|
||||
if value.Bool() {
|
||||
nv.Value = uint8(1)
|
||||
}
|
||||
case reflect.Int8:
|
||||
nv.Value = int8(value.Int())
|
||||
case reflect.Int16:
|
||||
nv.Value = int16(value.Int())
|
||||
case reflect.Int32:
|
||||
nv.Value = int32(value.Int())
|
||||
case reflect.Int64:
|
||||
nv.Value = value.Int()
|
||||
case reflect.Uint8:
|
||||
nv.Value = uint8(value.Uint())
|
||||
case reflect.Uint16:
|
||||
nv.Value = uint16(value.Uint())
|
||||
case reflect.Uint32:
|
||||
nv.Value = uint32(value.Uint())
|
||||
case reflect.Uint64:
|
||||
nv.Value = uint64(value.Uint())
|
||||
case reflect.Float32:
|
||||
nv.Value = float32(value.Float())
|
||||
case reflect.Float64:
|
||||
nv.Value = float64(value.Float())
|
||||
case reflect.String:
|
||||
nv.Value = value.String()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Close() error {
|
||||
ch.block = nil
|
||||
return ch.conn.Close()
|
||||
}
|
||||
|
||||
func (ch *clickhouse) process() error {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
switch packet {
|
||||
case protocol.ServerPong:
|
||||
ch.logf("[process] <- pong")
|
||||
return nil
|
||||
case protocol.ServerException:
|
||||
ch.logf("[process] <- exception")
|
||||
return ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch.logf("[process] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
case protocol.ServerEndOfStream:
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return fmt.Errorf("[process] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
if packet, err = ch.decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *clickhouse) cancel() error {
|
||||
ch.logf("[cancel request]")
|
||||
// even if we fail to write the cancel, we still need to close
|
||||
err := ch.encoder.Uvarint(protocol.ClientCancel)
|
||||
if err == nil {
|
||||
err = ch.encoder.Flush()
|
||||
}
|
||||
// return the close error if there was one, otherwise return the write error
|
||||
if cerr := ch.conn.Close(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ch *clickhouse) watchCancel(ctx context.Context) func() {
|
||||
if done := ctx.Done(); done != nil {
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
ch.cancel()
|
||||
finished <- struct{}{}
|
||||
ch.logf("[cancel] <- done")
|
||||
case <-finished:
|
||||
ch.logf("[cancel] <- finished")
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
select {
|
||||
case <-finished:
|
||||
case finished <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
return func() {}
|
||||
}
|
47
vendor/github.com/kshvakov/clickhouse/clickhouse_exception.go
generated
vendored
Normal file
47
vendor/github.com/kshvakov/clickhouse/clickhouse_exception.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Exception struct {
|
||||
Code int32
|
||||
Name string
|
||||
Message string
|
||||
StackTrace string
|
||||
nested error
|
||||
}
|
||||
|
||||
func (e *Exception) Error() string {
|
||||
return fmt.Sprintf("code: %d, message: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) exception() error {
|
||||
defer ch.conn.Close()
|
||||
var (
|
||||
e Exception
|
||||
err error
|
||||
hasNested bool
|
||||
)
|
||||
if e.Code, err = ch.decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Name, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Message, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
e.Message = strings.TrimSpace(strings.TrimPrefix(e.Message, e.Name+":"))
|
||||
if e.StackTrace, err = ch.decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested, err = ch.decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if hasNested {
|
||||
e.nested = ch.exception()
|
||||
}
|
||||
return &e
|
||||
}
|
28
vendor/github.com/kshvakov/clickhouse/clickhouse_ping.go
generated
vendored
Normal file
28
vendor/github.com/kshvakov/clickhouse/clickhouse_ping.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) Ping(ctx context.Context) error {
|
||||
return ch.ping(ctx)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) ping(ctx context.Context) error {
|
||||
if ch.conn.closed {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
ch.logf("-> ping")
|
||||
finish := ch.watchCancel(ctx)
|
||||
defer finish()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientPing); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.process()
|
||||
}
|
37
vendor/github.com/kshvakov/clickhouse/clickhouse_profile_info.go
generated
vendored
Normal file
37
vendor/github.com/kshvakov/clickhouse/clickhouse_profile_info.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package clickhouse
|
||||
|
||||
type profileInfo struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
blocks uint64
|
||||
appliedLimit bool
|
||||
rowsBeforeLimit uint64
|
||||
calculatedRowsBeforeLimit bool
|
||||
}
|
||||
|
||||
func (ch *clickhouse) profileInfo() (*profileInfo, error) {
|
||||
var (
|
||||
p profileInfo
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.blocks, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.appliedLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.rowsBeforeLimit, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.calculatedRowsBeforeLimit, err = ch.decoder.Bool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &p, nil
|
||||
}
|
26
vendor/github.com/kshvakov/clickhouse/clickhouse_progress.go
generated
vendored
Normal file
26
vendor/github.com/kshvakov/clickhouse/clickhouse_progress.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
package clickhouse
|
||||
|
||||
type progress struct {
|
||||
rows uint64
|
||||
bytes uint64
|
||||
totalRows uint64
|
||||
}
|
||||
|
||||
func (ch *clickhouse) progress() (*progress, error) {
|
||||
var (
|
||||
p progress
|
||||
err error
|
||||
)
|
||||
if p.rows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.bytes, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.totalRows, err = ch.decoder.Uvarint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
19
vendor/github.com/kshvakov/clickhouse/clickhouse_read_block.go
generated
vendored
Normal file
19
vendor/github.com/kshvakov/clickhouse/clickhouse_read_block.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readBlock() (*data.Block, error) {
|
||||
if _, err := ch.decoder.String(); err != nil { // temporary table
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch.decoder.SelectCompress(ch.compress)
|
||||
var block data.Block
|
||||
if err := block.Read(&ch.ServerInfo, ch.decoder); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.decoder.SelectCompress(false)
|
||||
return &block, nil
|
||||
}
|
53
vendor/github.com/kshvakov/clickhouse/clickhouse_read_meta.go
generated
vendored
Normal file
53
vendor/github.com/kshvakov/clickhouse/clickhouse_read_meta.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) readMeta() (*data.Block, error) {
|
||||
for {
|
||||
packet, err := ch.decoder.Uvarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
ch.logf("[read meta] <- exception")
|
||||
return nil, ch.exception()
|
||||
case protocol.ServerProgress:
|
||||
progress, err := ch.progress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
profileInfo, err := ch.profileInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData:
|
||||
block, err := ch.readBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ch.logf("[read meta] <- data: packet=%d, columns=%d, rows=%d", packet, block.NumColumns, block.NumRows)
|
||||
return block, nil
|
||||
case protocol.ServerEndOfStream:
|
||||
_, err := ch.readBlock()
|
||||
ch.logf("[process] <- end of stream")
|
||||
return nil, err
|
||||
default:
|
||||
ch.conn.Close()
|
||||
return nil, fmt.Errorf("[read meta] unexpected packet [%d] from server", packet)
|
||||
}
|
||||
}
|
||||
}
|
60
vendor/github.com/kshvakov/clickhouse/clickhouse_send_query.go
generated
vendored
Normal file
60
vendor/github.com/kshvakov/clickhouse/clickhouse_send_query.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) sendQuery(query string) error {
|
||||
ch.logf("[send query] %s", query)
|
||||
if err := ch.encoder.Uvarint(protocol.ClientQuery); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.String(""); err != nil {
|
||||
return err
|
||||
}
|
||||
{ // client info
|
||||
ch.encoder.Uvarint(1)
|
||||
ch.encoder.String("")
|
||||
ch.encoder.String("") //initial_query_id
|
||||
ch.encoder.String("[::ffff:127.0.0.1]:0")
|
||||
ch.encoder.Uvarint(1) // iface type TCP
|
||||
ch.encoder.String(hostname)
|
||||
ch.encoder.String(hostname)
|
||||
}
|
||||
if err := ch.ClientInfo.Write(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
if ch.ServerInfo.Revision >= protocol.DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO {
|
||||
ch.encoder.String("")
|
||||
}
|
||||
|
||||
// the settings are written as list of contiguous name-value pairs, finished with empty name
|
||||
if !ch.settings.IsEmpty() {
|
||||
ch.logf("[query settings] %s", ch.settings.settingsStr)
|
||||
if err := ch.settings.Serialize(ch.encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// empty string is a marker of the end of the settings
|
||||
if err := ch.encoder.String(""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.Uvarint(protocol.StateComplete); err != nil {
|
||||
return err
|
||||
}
|
||||
compress := protocol.CompressDisable
|
||||
if ch.compress {
|
||||
compress = protocol.CompressEnable
|
||||
}
|
||||
if err := ch.encoder.Uvarint(compress); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.encoder.String(query); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ch.writeBlock(&data.Block{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return ch.encoder.Flush()
|
||||
}
|
40
vendor/github.com/kshvakov/clickhouse/clickhouse_write_block.go
generated
vendored
Normal file
40
vendor/github.com/kshvakov/clickhouse/clickhouse_write_block.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
func (ch *clickhouse) writeBlock(block *data.Block) error {
|
||||
ch.Lock()
|
||||
defer ch.Unlock()
|
||||
if err := ch.encoder.Uvarint(protocol.ClientData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ch.encoder.String(""); err != nil { // temporary table
|
||||
return err
|
||||
}
|
||||
|
||||
// implement CityHash v 1.0.2 and add LZ4 compression
|
||||
/*
|
||||
From Alexey Milovidov
|
||||
Насколько я помню, сжимаются блоки с данными Native формата, а всё остальное (всякие номера пакетов и т. п.) передаётся без сжатия.
|
||||
|
||||
Сжатые данные устроены так. Они представляют собой набор сжатых фреймов.
|
||||
Каждый фрейм имеет следующий вид:
|
||||
чексумма (16 байт),
|
||||
идентификатор алгоритма сжатия (1 байт),
|
||||
размер сжатых данных (4 байта, little endian, размер не включает в себя чексумму, но включает в себя остальные 9 байт заголовка),
|
||||
размер несжатых данных (4 байта, little endian), затем сжатые данные.
|
||||
Идентификатор алгоритма: 0x82 - lz4, 0x90 - zstd.
|
||||
Чексумма - CityHash128 из CityHash версии 1.0.2, вычисленный от сжатых данных с учётом 9 байт заголовка.
|
||||
|
||||
См. CompressedReadBufferBase, CompressedWriteBuffer,
|
||||
utils/compressor, TCPHandler.
|
||||
*/
|
||||
ch.encoder.SelectCompress(ch.compress)
|
||||
err := block.Write(&ch.ServerInfo, ch.encoder)
|
||||
ch.encoder.SelectCompress(false)
|
||||
return err
|
||||
}
|
180
vendor/github.com/kshvakov/clickhouse/connect.go
generated
vendored
Normal file
180
vendor/github.com/kshvakov/clickhouse/connect.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"database/sql/driver"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var tick int32
|
||||
|
||||
type openStrategy int8
|
||||
|
||||
func (s openStrategy) String() string {
|
||||
switch s {
|
||||
case connOpenInOrder:
|
||||
return "in_order"
|
||||
}
|
||||
return "random"
|
||||
}
|
||||
|
||||
const (
|
||||
connOpenRandom openStrategy = iota + 1
|
||||
connOpenInOrder
|
||||
)
|
||||
|
||||
type connOptions struct {
|
||||
secure, skipVerify bool
|
||||
tlsConfig *tls.Config
|
||||
hosts []string
|
||||
connTimeout, readTimeout, writeTimeout time.Duration
|
||||
noDelay bool
|
||||
openStrategy openStrategy
|
||||
logf func(string, ...interface{})
|
||||
}
|
||||
|
||||
func dial(options connOptions) (*connect, error) {
|
||||
var (
|
||||
err error
|
||||
abs = func(v int) int {
|
||||
if v < 0 {
|
||||
return -1 * v
|
||||
}
|
||||
return v
|
||||
}
|
||||
conn net.Conn
|
||||
ident = abs(int(atomic.AddInt32(&tick, 1)))
|
||||
)
|
||||
tlsConfig := options.tlsConfig
|
||||
if options.secure {
|
||||
if tlsConfig == nil {
|
||||
tlsConfig = &tls.Config{}
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = options.skipVerify
|
||||
}
|
||||
for i := range options.hosts {
|
||||
var num int
|
||||
switch options.openStrategy {
|
||||
case connOpenInOrder:
|
||||
num = i
|
||||
case connOpenRandom:
|
||||
num = (ident + i) % len(options.hosts)
|
||||
}
|
||||
switch {
|
||||
case options.secure:
|
||||
conn, err = tls.DialWithDialer(
|
||||
&net.Dialer{
|
||||
Timeout: options.connTimeout,
|
||||
},
|
||||
"tcp",
|
||||
options.hosts[num],
|
||||
tlsConfig,
|
||||
)
|
||||
default:
|
||||
conn, err = net.DialTimeout("tcp", options.hosts[num], options.connTimeout)
|
||||
}
|
||||
if err == nil {
|
||||
options.logf(
|
||||
"[dial] secure=%t, skip_verify=%t, strategy=%s, ident=%d, server=%d -> %s",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
num,
|
||||
conn.RemoteAddr(),
|
||||
)
|
||||
if tcp, ok := conn.(*net.TCPConn); ok {
|
||||
err = tcp.SetNoDelay(options.noDelay) // Disable or enable the Nagle Algorithm for this tcp socket
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &connect{
|
||||
Conn: conn,
|
||||
logf: options.logf,
|
||||
ident: ident,
|
||||
buffer: bufio.NewReader(conn),
|
||||
readTimeout: options.readTimeout,
|
||||
writeTimeout: options.writeTimeout,
|
||||
}, nil
|
||||
} else {
|
||||
options.logf(
|
||||
"[dial err] secure=%t, skip_verify=%t, strategy=%s, ident=%d, addr=%s\n%#v",
|
||||
options.secure,
|
||||
options.skipVerify,
|
||||
options.openStrategy,
|
||||
ident,
|
||||
options.hosts[num],
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type connect struct {
|
||||
net.Conn
|
||||
logf func(string, ...interface{})
|
||||
ident int
|
||||
buffer *bufio.Reader
|
||||
closed bool
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
lastReadDeadlineTime time.Time
|
||||
lastWriteDeadlineTime time.Time
|
||||
}
|
||||
|
||||
func (conn *connect) Read(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
dstLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.readTimeout != 0 && currentTime.Sub(conn.lastReadDeadlineTime) > (conn.readTimeout>>2) {
|
||||
conn.SetReadDeadline(time.Now().Add(conn.readTimeout))
|
||||
conn.lastReadDeadlineTime = currentTime
|
||||
}
|
||||
for total < dstLen {
|
||||
if n, err = conn.buffer.Read(b[total:]); err != nil {
|
||||
conn.logf("[connect] read error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Write(b []byte) (int, error) {
|
||||
var (
|
||||
n int
|
||||
err error
|
||||
total int
|
||||
srcLen = len(b)
|
||||
)
|
||||
if currentTime := now(); conn.writeTimeout != 0 && currentTime.Sub(conn.lastWriteDeadlineTime) > (conn.writeTimeout>>2) {
|
||||
conn.SetWriteDeadline(time.Now().Add(conn.writeTimeout))
|
||||
conn.lastWriteDeadlineTime = currentTime
|
||||
}
|
||||
for total < srcLen {
|
||||
if n, err = conn.Conn.Write(b[total:]); err != nil {
|
||||
conn.logf("[connect] write error: %v", err)
|
||||
conn.Close()
|
||||
return n, driver.ErrBadConn
|
||||
}
|
||||
total += n
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (conn *connect) Close() error {
|
||||
if !conn.closed {
|
||||
conn.closed = true
|
||||
return conn.Conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
9
vendor/github.com/kshvakov/clickhouse/docker-compose.yml
generated
vendored
Normal file
9
vendor/github.com/kshvakov/clickhouse/docker-compose.yml
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
version: '3'
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
ports:
|
||||
- 127.0.0.1:8123:8123
|
||||
- 127.0.0.1:9000:9000
|
||||
- 127.0.0.1:9009:9009
|
10
vendor/github.com/kshvakov/clickhouse/go.mod
generated
vendored
Normal file
10
vendor/github.com/kshvakov/clickhouse/go.mod
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
module github.com/kshvakov/clickhouse
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/bkaradzic/go-lz4 v1.0.0
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
13
vendor/github.com/kshvakov/clickhouse/go.sum
generated
vendored
Normal file
13
vendor/github.com/kshvakov/clickhouse/go.sum
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
12
vendor/github.com/kshvakov/clickhouse/go.test.sh
generated
vendored
Executable file
12
vendor/github.com/kshvakov/clickhouse/go.test.sh
generated
vendored
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list ./... | grep -v vendor | grep -v examples); do
|
||||
go test -race -coverprofile=profile.out -covermode=atomic $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
rm profile.out
|
||||
fi
|
||||
done
|
122
vendor/github.com/kshvakov/clickhouse/helpers.go
generated
vendored
Normal file
122
vendor/github.com/kshvakov/clickhouse/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func numInput(query string) int {
|
||||
|
||||
var (
|
||||
count int
|
||||
args = make(map[string]struct{})
|
||||
reader = bytes.NewReader([]byte(query))
|
||||
quote, keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
between = newMatcher("between")
|
||||
and = newMatcher("and")
|
||||
)
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
switch char {
|
||||
case '\'', '`':
|
||||
quote = !quote
|
||||
}
|
||||
if quote {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case char == '?' && keyword:
|
||||
count++
|
||||
case char == '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
if _, found := args[param]; !found {
|
||||
args[param] = struct{}{}
|
||||
count++
|
||||
}
|
||||
}
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '[':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || like.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && (char == ' ' || char == '\t' || char == '\n')
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func paramParser(reader *bytes.Reader) string {
|
||||
var name bytes.Buffer
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
if char == '_' || char >= '0' && char <= '9' || 'a' <= char && char <= 'z' || 'A' <= char && char <= 'Z' {
|
||||
name.WriteRune(char)
|
||||
} else {
|
||||
reader.UnreadRune()
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return name.String()
|
||||
}
|
||||
|
||||
var selectRe = regexp.MustCompile(`\s+SELECT\s+`)
|
||||
|
||||
func isInsert(query string) bool {
|
||||
if f := strings.Fields(query); len(f) > 2 {
|
||||
return strings.EqualFold("INSERT", f[0]) && strings.EqualFold("INTO", f[1]) && !selectRe.MatchString(strings.ToUpper(query))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func quote(v driver.Value) string {
|
||||
switch v := reflect.ValueOf(v); v.Kind() {
|
||||
case reflect.Slice:
|
||||
values := make([]string, 0, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
values = append(values, quote(v.Index(i).Interface()))
|
||||
}
|
||||
return strings.Join(values, ", ")
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(v) + "'"
|
||||
case time.Time:
|
||||
return formatTime(v)
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
func formatTime(value time.Time) string {
|
||||
if (value.Hour() + value.Minute() + value.Second() + value.Nanosecond()) == 0 {
|
||||
return fmt.Sprintf("toDate(%d)", int(int16(value.Unix()/24/3600)))
|
||||
}
|
||||
return fmt.Sprintf("toDateTime(%d)", int(uint32(value.Unix())))
|
||||
}
|
107
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_reader.go
generated
vendored
Normal file
107
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_reader.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/lz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
_, err = lz4.Decode(cr.data, cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
107
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_reader_clz4.go
generated
vendored
Normal file
107
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_reader_clz4.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
)
|
||||
|
||||
type compressReader struct {
|
||||
reader io.Reader
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
// lz4 headers
|
||||
header []byte
|
||||
}
|
||||
|
||||
// NewCompressReader wrap the io.Reader
|
||||
func NewCompressReader(r io.Reader) *compressReader {
|
||||
p := &compressReader{
|
||||
reader: r,
|
||||
header: make([]byte, HeaderSize),
|
||||
}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
|
||||
p.pos = len(p.data)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cr *compressReader) Read(buf []byte) (n int, err error) {
|
||||
var bytesRead = 0
|
||||
n = len(buf)
|
||||
|
||||
if cr.pos < len(cr.data) {
|
||||
copyedSize := copy(buf, cr.data[cr.pos:])
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos += copyedSize
|
||||
}
|
||||
|
||||
for bytesRead < n {
|
||||
if err = cr.readCompressedData(); err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
copyedSize := copy(buf[bytesRead:], cr.data)
|
||||
|
||||
bytesRead += copyedSize
|
||||
cr.pos = copyedSize
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cr *compressReader) readCompressedData() (err error) {
|
||||
cr.pos = 0
|
||||
var n int
|
||||
n, err = cr.reader.Read(cr.header)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(cr.header) {
|
||||
return fmt.Errorf("Lz4 decompression header EOF")
|
||||
}
|
||||
|
||||
compressedSize := int(binary.LittleEndian.Uint32(cr.header[17:])) - 9
|
||||
decompressedSize := int(binary.LittleEndian.Uint32(cr.header[21:]))
|
||||
|
||||
if compressedSize > cap(cr.zdata) {
|
||||
cr.zdata = make([]byte, compressedSize)
|
||||
}
|
||||
if decompressedSize > cap(cr.data) {
|
||||
cr.data = make([]byte, decompressedSize)
|
||||
}
|
||||
|
||||
cr.zdata = cr.zdata[:compressedSize]
|
||||
cr.data = cr.data[:decompressedSize]
|
||||
|
||||
// @TODO checksum
|
||||
if cr.header[16] == LZ4 {
|
||||
n, err = cr.reader.Read(cr.zdata)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if n != len(cr.zdata) {
|
||||
return fmt.Errorf("Decompress read size not match")
|
||||
}
|
||||
|
||||
err = lz4.Uncompress(cr.zdata, cr.data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unknown compression method: 0x%02x ", cr.header[16])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
21
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_settings.go
generated
vendored
Normal file
21
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_settings.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package binary
|
||||
|
||||
type CompressionMethodByte byte
|
||||
|
||||
const (
|
||||
NONE CompressionMethodByte = 0x02
|
||||
LZ4 = 0x82
|
||||
ZSTD = 0x90
|
||||
)
|
||||
|
||||
const (
|
||||
// ChecksumSize is 128bits for cityhash102 checksum
|
||||
ChecksumSize = 16
|
||||
// CompressHeader magic + compressed_size + uncompressed_size
|
||||
CompressHeaderSize = 1 + 4 + 4
|
||||
|
||||
// HeaderSize
|
||||
HeaderSize = ChecksumSize + CompressHeaderSize
|
||||
// BlockMaxSize 1MB
|
||||
BlockMaxSize = 1 << 10
|
||||
)
|
79
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_writer.go
generated
vendored
Normal file
79
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_writer.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// +build !clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/cityhash102"
|
||||
"github.com/kshvakov/clickhouse/lib/lz4"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(BlockMaxSize) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Encode(cw.zdata[HeaderSize:], cw.data[:cw.pos])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
78
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_writer_clz4.go
generated
vendored
Normal file
78
vendor/github.com/kshvakov/clickhouse/lib/binary/compress_writer_clz4.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// +build clz4
|
||||
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
lz4 "github.com/cloudflare/golz4"
|
||||
"github.com/kshvakov/clickhouse/lib/cityhash102"
|
||||
)
|
||||
|
||||
type compressWriter struct {
|
||||
writer io.Writer
|
||||
// data uncompressed
|
||||
data []byte
|
||||
// data position
|
||||
pos int
|
||||
// data compressed
|
||||
zdata []byte
|
||||
}
|
||||
|
||||
// NewCompressWriter wrap the io.Writer
|
||||
func NewCompressWriter(w io.Writer) *compressWriter {
|
||||
p := &compressWriter{writer: w}
|
||||
p.data = make([]byte, BlockMaxSize, BlockMaxSize)
|
||||
|
||||
zlen := lz4.CompressBound(p.data) + HeaderSize
|
||||
p.zdata = make([]byte, zlen, zlen)
|
||||
return p
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Write(buf []byte) (int, error) {
|
||||
var n int
|
||||
for len(buf) > 0 {
|
||||
// Accumulate the data to be compressed.
|
||||
m := copy(cw.data[cw.pos:], buf)
|
||||
cw.pos += m
|
||||
buf = buf[m:]
|
||||
|
||||
if cw.pos == len(cw.data) {
|
||||
err := cw.Flush()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
n += m
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (cw *compressWriter) Flush() (err error) {
|
||||
if cw.pos == 0 {
|
||||
return
|
||||
}
|
||||
// write the headers
|
||||
compressedSize, err := lz4.Compress(cw.data[:cw.pos], cw.zdata[HeaderSize:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressedSize += CompressHeaderSize
|
||||
// fill the header, compressed_size_32 + uncompressed_size_32
|
||||
cw.zdata[16] = LZ4
|
||||
binary.LittleEndian.PutUint32(cw.zdata[17:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint32(cw.zdata[21:], uint32(cw.pos))
|
||||
|
||||
// fill the checksum
|
||||
checkSum := cityhash102.CityHash128(cw.zdata[16:], uint32(compressedSize))
|
||||
binary.LittleEndian.PutUint64(cw.zdata[0:], checkSum.Lower64())
|
||||
binary.LittleEndian.PutUint64(cw.zdata[8:], checkSum.Higher64())
|
||||
|
||||
cw.writer.Write(cw.zdata[:compressedSize+ChecksumSize])
|
||||
if w, ok := cw.writer.(WriteFlusher); ok {
|
||||
err = w.Flush()
|
||||
}
|
||||
cw.pos = 0
|
||||
return
|
||||
}
|
165
vendor/github.com/kshvakov/clickhouse/lib/binary/decoder.go
generated
vendored
Normal file
165
vendor/github.com/kshvakov/clickhouse/lib/binary/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
func NewDecoder(input io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
input: input,
|
||||
compressInput: NewCompressReader(input),
|
||||
}
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
compress bool
|
||||
input io.Reader
|
||||
compressInput io.Reader
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (decoder *Decoder) SelectCompress(compress bool) {
|
||||
decoder.compress = compress
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Get() io.Reader {
|
||||
if decoder.compress {
|
||||
return decoder.compressInput
|
||||
}
|
||||
return decoder.input
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Bool() (bool, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return v == 1, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Uvarint() (uint64, error) {
|
||||
return binary.ReadUvarint(decoder)
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int8() (int8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int16() (int16, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int16(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int32() (int32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Int64() (int64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt8() (uint8, error) {
|
||||
v, err := decoder.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint8(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt16() (uint16, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:2]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(decoder.scratch[0]) | uint16(decoder.scratch[1])<<8, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt32() (uint32, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:4]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(decoder.scratch[0]) |
|
||||
uint32(decoder.scratch[1])<<8 |
|
||||
uint32(decoder.scratch[2])<<16 |
|
||||
uint32(decoder.scratch[3])<<24, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) UInt64() (uint64, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:8]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(decoder.scratch[0]) |
|
||||
uint64(decoder.scratch[1])<<8 |
|
||||
uint64(decoder.scratch[2])<<16 |
|
||||
uint64(decoder.scratch[3])<<24 |
|
||||
uint64(decoder.scratch[4])<<32 |
|
||||
uint64(decoder.scratch[5])<<40 |
|
||||
uint64(decoder.scratch[6])<<48 |
|
||||
uint64(decoder.scratch[7])<<56, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float32() (float32, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float32frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Float64() (float64, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Float64frombits(v), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) Fixed(ln int) ([]byte, error) {
|
||||
if reader, ok := decoder.Get().(FixedReader); ok {
|
||||
return reader.Fixed(ln)
|
||||
}
|
||||
buf := make([]byte, ln)
|
||||
if _, err := decoder.Get().Read(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) String() (string, error) {
|
||||
strlen, err := decoder.Uvarint()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str, err := decoder.Fixed(int(strlen))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(str), nil
|
||||
}
|
||||
|
||||
func (decoder *Decoder) ReadByte() (byte, error) {
|
||||
if _, err := decoder.Get().Read(decoder.scratch[:1]); err != nil {
|
||||
return 0x0, err
|
||||
}
|
||||
return decoder.scratch[0], nil
|
||||
}
|
||||
|
||||
type FixedReader interface {
|
||||
Fixed(ln int) ([]byte, error)
|
||||
}
|
162
vendor/github.com/kshvakov/clickhouse/lib/binary/encoder.go
generated
vendored
Normal file
162
vendor/github.com/kshvakov/clickhouse/lib/binary/encoder.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
output: w,
|
||||
compressOutput: NewCompressWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
type Encoder struct {
|
||||
compress bool
|
||||
output io.Writer
|
||||
compressOutput io.Writer
|
||||
scratch [binary.MaxVarintLen64]byte
|
||||
}
|
||||
|
||||
func (enc *Encoder) SelectCompress(compress bool) {
|
||||
if enc.compress && !compress {
|
||||
enc.Flush()
|
||||
}
|
||||
enc.compress = compress
|
||||
}
|
||||
|
||||
func (enc *Encoder) Get() io.Writer {
|
||||
if enc.compress {
|
||||
return enc.compressOutput
|
||||
}
|
||||
return enc.output
|
||||
}
|
||||
|
||||
func (enc *Encoder) Uvarint(v uint64) error {
|
||||
ln := binary.PutUvarint(enc.scratch[:binary.MaxVarintLen64], v)
|
||||
if _, err := enc.Get().Write(enc.scratch[0:ln]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Bool(v bool) error {
|
||||
if v {
|
||||
return enc.UInt8(1)
|
||||
}
|
||||
return enc.UInt8(0)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int8(v int8) error {
|
||||
return enc.UInt8(uint8(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int16(v int16) error {
|
||||
return enc.UInt16(uint16(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int32(v int32) error {
|
||||
return enc.UInt32(uint32(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Int64(v int64) error {
|
||||
return enc.UInt64(uint64(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt8(v uint8) error {
|
||||
enc.scratch[0] = v
|
||||
if _, err := enc.Get().Write(enc.scratch[:1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt16(v uint16) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
if _, err := enc.Get().Write(enc.scratch[:2]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt32(v uint32) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
if _, err := enc.Get().Write(enc.scratch[:4]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) UInt64(v uint64) error {
|
||||
enc.scratch[0] = byte(v)
|
||||
enc.scratch[1] = byte(v >> 8)
|
||||
enc.scratch[2] = byte(v >> 16)
|
||||
enc.scratch[3] = byte(v >> 24)
|
||||
enc.scratch[4] = byte(v >> 32)
|
||||
enc.scratch[5] = byte(v >> 40)
|
||||
enc.scratch[6] = byte(v >> 48)
|
||||
enc.scratch[7] = byte(v >> 56)
|
||||
if _, err := enc.Get().Write(enc.scratch[:8]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float32(v float32) error {
|
||||
return enc.UInt32(math.Float32bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) Float64(v float64) error {
|
||||
return enc.UInt64(math.Float64bits(v))
|
||||
}
|
||||
|
||||
func (enc *Encoder) String(v string) error {
|
||||
str := Str2Bytes(v)
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) RawString(str []byte) error {
|
||||
if err := enc.Uvarint(uint64(len(str))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := enc.Get().Write(str); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) Write(b []byte) (int, error) {
|
||||
return enc.Get().Write(b)
|
||||
}
|
||||
|
||||
func (enc *Encoder) Flush() error {
|
||||
if w, ok := enc.Get().(WriteFlusher); ok {
|
||||
return w.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WriteFlusher interface {
|
||||
Flush() error
|
||||
}
|
||||
|
||||
func Str2Bytes(str string) []byte {
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&str))
|
||||
header.Len = len(str)
|
||||
header.Cap = header.Len
|
||||
return *(*[]byte)(unsafe.Pointer(header))
|
||||
}
|
45
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/city64.go
generated
vendored
Normal file
45
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/city64.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
type City64 struct {
|
||||
s []byte
|
||||
}
|
||||
|
||||
var _ hash.Hash64 = (*City64)(nil)
|
||||
var _ hash.Hash = (*City64)(nil)
|
||||
|
||||
func New64() hash.Hash64 {
|
||||
return &City64{}
|
||||
}
|
||||
|
||||
func (this *City64) Sum(b []byte) []byte {
|
||||
b2 := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b2, this.Sum64())
|
||||
b = append(b, b2...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (this *City64) Sum64() uint64 {
|
||||
return CityHash64(this.s, uint32(len(this.s)))
|
||||
}
|
||||
|
||||
func (this *City64) Reset() {
|
||||
this.s = this.s[0:0]
|
||||
}
|
||||
|
||||
func (this *City64) BlockSize() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (this *City64) Write(s []byte) (n int, err error) {
|
||||
this.s = append(this.s, s...)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func (this *City64) Size() int {
|
||||
return 8
|
||||
}
|
383
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/cityhash.go
generated
vendored
Normal file
383
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/cityhash.go
generated
vendored
Normal file
@@ -0,0 +1,383 @@
|
||||
/*
|
||||
* Go implementation of Google city hash (MIT license)
|
||||
* https://code.google.com/p/cityhash/
|
||||
*
|
||||
* MIT License http://www.opensource.org/licenses/mit-license.php
|
||||
*
|
||||
* I don't even want to pretend to understand the details of city hash.
|
||||
* I am only reproducing the logic in Go as faithfully as I can.
|
||||
*
|
||||
*/
|
||||
|
||||
package cityhash102
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
k0 uint64 = 0xc3a5c85c97cb3127
|
||||
k1 uint64 = 0xb492b66fbe98f273
|
||||
k2 uint64 = 0x9ae16a3b2f90404f
|
||||
k3 uint64 = 0xc949d7c7509e6557
|
||||
|
||||
kMul uint64 = 0x9ddfea08eb382d69
|
||||
)
|
||||
|
||||
func fetch64(p []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(p)
|
||||
//return uint64InExpectedOrder(unalignedLoad64(p))
|
||||
}
|
||||
|
||||
func fetch32(p []byte) uint32 {
|
||||
return binary.LittleEndian.Uint32(p)
|
||||
//return uint32InExpectedOrder(unalignedLoad32(p))
|
||||
}
|
||||
|
||||
func rotate64(val uint64, shift uint32) uint64 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (64 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func rotate32(val uint32, shift uint32) uint32 {
|
||||
if shift != 0 {
|
||||
return ((val >> shift) | (val << (32 - shift)))
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func swap64(a, b *uint64) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func swap32(a, b *uint32) {
|
||||
*a, *b = *b, *a
|
||||
}
|
||||
|
||||
func permute3(a, b, c *uint32) {
|
||||
swap32(a, b)
|
||||
swap32(a, c)
|
||||
}
|
||||
|
||||
func rotate64ByAtLeast1(val uint64, shift uint32) uint64 {
|
||||
return (val >> shift) | (val << (64 - shift))
|
||||
}
|
||||
|
||||
func shiftMix(val uint64) uint64 {
|
||||
return val ^ (val >> 47)
|
||||
}
|
||||
|
||||
type Uint128 [2]uint64
|
||||
|
||||
func (this *Uint128) setLower64(l uint64) {
|
||||
this[0] = l
|
||||
}
|
||||
|
||||
func (this *Uint128) setHigher64(h uint64) {
|
||||
this[1] = h
|
||||
}
|
||||
|
||||
func (this Uint128) Lower64() uint64 {
|
||||
return this[0]
|
||||
}
|
||||
|
||||
func (this Uint128) Higher64() uint64 {
|
||||
return this[1]
|
||||
}
|
||||
|
||||
func (this Uint128) Bytes() []byte {
|
||||
b := make([]byte, 16)
|
||||
binary.LittleEndian.PutUint64(b, this[0])
|
||||
binary.LittleEndian.PutUint64(b[8:], this[1])
|
||||
return b
|
||||
}
|
||||
|
||||
func hash128to64(x Uint128) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (x.Lower64() ^ x.Higher64()) * kMul
|
||||
a ^= (a >> 47)
|
||||
var b = (x.Higher64() ^ a) * kMul
|
||||
b ^= (b >> 47)
|
||||
b *= kMul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen16(u, v uint64) uint64 {
|
||||
return hash128to64(Uint128{u, v})
|
||||
}
|
||||
|
||||
func hashLen16_3(u, v, mul uint64) uint64 {
|
||||
// Murmur-inspired hashing.
|
||||
var a = (u ^ v) * mul
|
||||
a ^= (a >> 47)
|
||||
var b = (v ^ a) * mul
|
||||
b ^= (b >> 47)
|
||||
b *= mul
|
||||
return b
|
||||
}
|
||||
|
||||
func hashLen0to16(s []byte, length uint32) uint64 {
|
||||
if length > 8 {
|
||||
var a = fetch64(s)
|
||||
var b = fetch64(s[length-8:])
|
||||
|
||||
return hashLen16(a, rotate64ByAtLeast1(b+uint64(length), length)) ^ b
|
||||
}
|
||||
|
||||
if length >= 4 {
|
||||
var a = fetch32(s)
|
||||
return hashLen16(uint64(length)+(uint64(a)<<3), uint64(fetch32(s[length-4:])))
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
var a uint8 = uint8(s[0])
|
||||
var b uint8 = uint8(s[length>>1])
|
||||
var c uint8 = uint8(s[length-1])
|
||||
|
||||
var y uint32 = uint32(a) + (uint32(b) << 8)
|
||||
var z uint32 = length + (uint32(c) << 2)
|
||||
|
||||
return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2
|
||||
}
|
||||
|
||||
return k2
|
||||
}
|
||||
|
||||
// This probably works well for 16-byte strings as well, but it may be overkill
|
||||
func hashLen17to32(s []byte, length uint32) uint64 {
|
||||
var a = fetch64(s) * k1
|
||||
var b = fetch64(s[8:])
|
||||
var c = fetch64(s[length-8:]) * k2
|
||||
var d = fetch64(s[length-16:]) * k0
|
||||
|
||||
return hashLen16(rotate64(a-b, 43)+rotate64(c, 30)+d,
|
||||
a+rotate64(b^k3, 20)-c+uint64(length))
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds(w, x, y, z, a, b uint64) Uint128 {
|
||||
a += w
|
||||
b = rotate64(b+a+z, 21)
|
||||
var c uint64 = a
|
||||
a += x
|
||||
a += y
|
||||
b += rotate64(a, 44)
|
||||
return Uint128{a + z, b + c}
|
||||
}
|
||||
|
||||
func weakHashLen32WithSeeds_3(s []byte, a, b uint64) Uint128 {
|
||||
return weakHashLen32WithSeeds(fetch64(s), fetch64(s[8:]), fetch64(s[16:]), fetch64(s[24:]), a, b)
|
||||
}
|
||||
|
||||
func hashLen33to64(s []byte, length uint32) uint64 {
|
||||
var z uint64 = fetch64(s[24:])
|
||||
var a uint64 = fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0
|
||||
var b uint64 = rotate64(a+z, 52)
|
||||
var c uint64 = rotate64(a, 37)
|
||||
|
||||
a += fetch64(s[8:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[16:])
|
||||
|
||||
var vf uint64 = a + z
|
||||
var vs = b + rotate64(a, 31) + c
|
||||
|
||||
a = fetch64(s[16:]) + fetch64(s[length-32:])
|
||||
z = fetch64(s[length-8:])
|
||||
b = rotate64(a+z, 52)
|
||||
c = rotate64(a, 37)
|
||||
a += fetch64(s[length-24:])
|
||||
c += rotate64(a, 7)
|
||||
a += fetch64(s[length-16:])
|
||||
|
||||
wf := a + z
|
||||
ws := b + rotate64(a, 31) + c
|
||||
r := shiftMix((vf+ws)*k2 + (wf+vs)*k0)
|
||||
return shiftMix(r*k0+vs) * k2
|
||||
}
|
||||
|
||||
func CityHash64(s []byte, length uint32) uint64 {
|
||||
if length <= 32 {
|
||||
if length <= 16 {
|
||||
return hashLen0to16(s, length)
|
||||
} else {
|
||||
return hashLen17to32(s, length)
|
||||
}
|
||||
} else if length <= 64 {
|
||||
return hashLen33to64(s, length)
|
||||
}
|
||||
|
||||
var x uint64 = fetch64(s)
|
||||
var y uint64 = fetch64(s[length-16:]) ^ k1
|
||||
var z uint64 = fetch64(s[length-56:]) ^ k0
|
||||
|
||||
var v Uint128 = weakHashLen32WithSeeds_3(s[length-64:], uint64(length), y)
|
||||
var w Uint128 = weakHashLen32WithSeeds_3(s[length-32:], uint64(length)*k1, k0)
|
||||
|
||||
z += shiftMix(v.Higher64()) * k1
|
||||
x = rotate64(z+x, 39) * k1
|
||||
y = rotate64(y, 33) * k1
|
||||
|
||||
length = (length - 1) & ^uint32(63)
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
length -= 64
|
||||
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return hashLen16(hashLen16(v.Lower64(), w.Lower64())+shiftMix(y)*k1+z, hashLen16(v.Higher64(), w.Higher64())+x)
|
||||
}
|
||||
|
||||
func CityHash64WithSeed(s []byte, length uint32, seed uint64) uint64 {
|
||||
return CityHash64WithSeeds(s, length, k2, seed)
|
||||
}
|
||||
|
||||
func CityHash64WithSeeds(s []byte, length uint32, seed0, seed1 uint64) uint64 {
|
||||
return hashLen16(CityHash64(s, length)-seed0, seed1)
|
||||
}
|
||||
|
||||
func cityMurmur(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
var a uint64 = seed.Lower64()
|
||||
var b uint64 = seed.Higher64()
|
||||
var c uint64 = 0
|
||||
var d uint64 = 0
|
||||
var l int32 = int32(length) - 16
|
||||
|
||||
if l <= 0 { // len <= 16
|
||||
a = shiftMix(a*k1) * k1
|
||||
c = b*k1 + hashLen0to16(s, length)
|
||||
|
||||
if length >= 8 {
|
||||
d = shiftMix(a + fetch64(s))
|
||||
} else {
|
||||
d = shiftMix(a + c)
|
||||
}
|
||||
|
||||
} else { // len > 16
|
||||
c = hashLen16(fetch64(s[length-8:])+k1, a)
|
||||
d = hashLen16(b+uint64(length), c+fetch64(s[length-16:]))
|
||||
a += d
|
||||
|
||||
for {
|
||||
a ^= shiftMix(fetch64(s)*k1) * k1
|
||||
a *= k1
|
||||
b ^= a
|
||||
c ^= shiftMix(fetch64(s[8:])*k1) * k1
|
||||
c *= k1
|
||||
d ^= c
|
||||
s = s[16:]
|
||||
l -= 16
|
||||
|
||||
if l <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
a = hashLen16(a, c)
|
||||
b = hashLen16(d, b)
|
||||
return Uint128{a ^ b, hashLen16(b, a)}
|
||||
}
|
||||
|
||||
func CityHash128WithSeed(s []byte, length uint32, seed Uint128) Uint128 {
|
||||
if length < 128 {
|
||||
return cityMurmur(s, length, seed)
|
||||
}
|
||||
|
||||
// We expect length >= 128 to be the common case. Keep 56 bytes of state:
|
||||
// v, w, x, y, and z.
|
||||
var v, w Uint128
|
||||
var x uint64 = seed.Lower64()
|
||||
var y uint64 = seed.Higher64()
|
||||
var z uint64 = uint64(length) * k1
|
||||
|
||||
var pos uint32
|
||||
var t = s
|
||||
|
||||
v.setLower64(rotate64(y^k1, 49)*k1 + fetch64(s))
|
||||
v.setHigher64(rotate64(v.Lower64(), 42)*k1 + fetch64(s[8:]))
|
||||
w.setLower64(rotate64(y+z, 35)*k1 + x)
|
||||
w.setHigher64(rotate64(x+fetch64(s[88:]), 53) * k1)
|
||||
|
||||
// This is the same inner loop as CityHash64(), manually unrolled.
|
||||
for {
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
|
||||
x = rotate64(x+y+v.Lower64()+fetch64(s[16:]), 37) * k1
|
||||
y = rotate64(y+v.Higher64()+fetch64(s[48:]), 42) * k1
|
||||
x ^= w.Higher64()
|
||||
y ^= v.Lower64()
|
||||
z = rotate64(z^w.Lower64(), 33)
|
||||
v = weakHashLen32WithSeeds_3(s, v.Higher64()*k1, x+w.Lower64())
|
||||
w = weakHashLen32WithSeeds_3(s[32:], z+w.Higher64(), y)
|
||||
swap64(&z, &x)
|
||||
s = s[64:]
|
||||
pos += 64
|
||||
length -= 128
|
||||
|
||||
if length < 128 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
y += rotate64(w.Lower64(), 37)*k0 + z
|
||||
x += rotate64(v.Lower64()+z, 49) * k0
|
||||
|
||||
// If 0 < length < 128, hash up to 4 chunks of 32 bytes each from the end of s.
|
||||
var tailDone uint32
|
||||
for tailDone = 0; tailDone < length; {
|
||||
tailDone += 32
|
||||
y = rotate64(y-x, 42)*k0 + v.Higher64()
|
||||
|
||||
//TODO why not use origin_len ?
|
||||
w.setLower64(w.Lower64() + fetch64(t[pos+length-tailDone+16:]))
|
||||
x = rotate64(x, 49)*k0 + w.Lower64()
|
||||
w.setLower64(w.Lower64() + v.Lower64())
|
||||
v = weakHashLen32WithSeeds_3(t[pos+length-tailDone:], v.Lower64(), v.Higher64())
|
||||
}
|
||||
// At this point our 48 bytes of state should contain more than
|
||||
// enough information for a strong 128-bit hash. We use two
|
||||
// different 48-byte-to-8-byte hashes to get a 16-byte final result.
|
||||
x = hashLen16(x, v.Lower64())
|
||||
y = hashLen16(y, w.Lower64())
|
||||
|
||||
return Uint128{hashLen16(x+v.Higher64(), w.Higher64()) + y,
|
||||
hashLen16(x+w.Higher64(), y+v.Higher64())}
|
||||
}
|
||||
|
||||
func CityHash128(s []byte, length uint32) (result Uint128) {
|
||||
if length >= 16 {
|
||||
result = CityHash128WithSeed(s[16:length], length-16, Uint128{fetch64(s) ^ k3, fetch64(s[8:])})
|
||||
} else if length >= 8 {
|
||||
result = CityHash128WithSeed(nil, 0, Uint128{fetch64(s) ^ (uint64(length) * k0), fetch64(s[length-8:]) ^ k1})
|
||||
} else {
|
||||
result = CityHash128WithSeed(s, length, Uint128{k0, k1})
|
||||
}
|
||||
return
|
||||
}
|
5
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/doc.go
generated
vendored
Normal file
5
vendor/github.com/kshvakov/clickhouse/lib/cityhash102/doc.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/** COPY from https://github.com/zentures/cityhash/
|
||||
|
||||
NOTE: The code is modified to be compatible with CityHash128 used in ClickHouse
|
||||
*/
|
||||
package cityhash102
|
124
vendor/github.com/kshvakov/clickhouse/lib/column/array.go
generated
vendored
Normal file
124
vendor/github.com/kshvakov/clickhouse/lib/column/array.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Array struct {
|
||||
base
|
||||
depth int
|
||||
column Column
|
||||
}
|
||||
|
||||
func (array *Array) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
return nil, fmt.Errorf("do not use Read method for Array(T) column")
|
||||
}
|
||||
|
||||
func (array *Array) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return array.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func (array *Array) ReadArray(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
values = make([]interface{}, rows)
|
||||
offsets = make([]uint64, rows)
|
||||
)
|
||||
for i := 0; i < rows; i++ {
|
||||
offset, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[i] = offset
|
||||
}
|
||||
for n, offset := range offsets {
|
||||
ln := offset
|
||||
if n != 0 {
|
||||
ln = ln - offsets[n-1]
|
||||
}
|
||||
if values[n], err = array.read(decoder, int(ln)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (array *Array) read(decoder *binary.Decoder, ln int) (interface{}, error) {
|
||||
slice := reflect.MakeSlice(array.valueOf.Type(), 0, ln)
|
||||
for i := 0; i < ln; i++ {
|
||||
value, err := array.column.Read(decoder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slice = reflect.Append(slice, reflect.ValueOf(value))
|
||||
}
|
||||
return slice.Interface(), nil
|
||||
}
|
||||
|
||||
func parseArray(name, chType string, timezone *time.Location) (*Array, error) {
|
||||
if len(chType) < 11 {
|
||||
return nil, fmt.Errorf("invalid Array column type: %s", chType)
|
||||
}
|
||||
var (
|
||||
depth int
|
||||
columnType = chType
|
||||
)
|
||||
|
||||
loop:
|
||||
for _, str := range strings.Split(chType, "Array(") {
|
||||
switch {
|
||||
case len(str) == 0:
|
||||
depth++
|
||||
default:
|
||||
chType = str[:len(str)-depth]
|
||||
break loop
|
||||
}
|
||||
}
|
||||
column, err := Factory(name, chType, timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Array(T): %v", err)
|
||||
}
|
||||
|
||||
var scanType interface{}
|
||||
switch t := column.ScanType().Kind(); t {
|
||||
case reflect.Int8:
|
||||
scanType = []int8{}
|
||||
case reflect.Int16:
|
||||
scanType = []int16{}
|
||||
case reflect.Int32:
|
||||
scanType = []int32{}
|
||||
case reflect.Int64:
|
||||
scanType = []int64{}
|
||||
case reflect.Uint8:
|
||||
scanType = []uint8{}
|
||||
case reflect.Uint16:
|
||||
scanType = []uint16{}
|
||||
case reflect.Uint32:
|
||||
scanType = []uint32{}
|
||||
case reflect.Uint64:
|
||||
scanType = []uint64{}
|
||||
case reflect.Float32:
|
||||
scanType = []float32{}
|
||||
case reflect.Float64:
|
||||
scanType = []float64{}
|
||||
case reflect.String:
|
||||
scanType = []string{}
|
||||
case baseTypes[time.Time{}].Kind():
|
||||
scanType = []time.Time{}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported Array type '%s'", column.ScanType().Name())
|
||||
}
|
||||
return &Array{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: columnType,
|
||||
valueOf: reflect.ValueOf(scanType),
|
||||
},
|
||||
depth: depth,
|
||||
column: column,
|
||||
}, nil
|
||||
}
|
168
vendor/github.com/kshvakov/clickhouse/lib/column/column.go
generated
vendored
Normal file
168
vendor/github.com/kshvakov/clickhouse/lib/column/column.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Column interface {
|
||||
Name() string
|
||||
CHType() string
|
||||
ScanType() reflect.Type
|
||||
Read(*binary.Decoder) (interface{}, error)
|
||||
Write(*binary.Encoder, interface{}) error
|
||||
defaultValue() interface{}
|
||||
}
|
||||
|
||||
func Factory(name, chType string, timezone *time.Location) (Column, error) {
|
||||
switch chType {
|
||||
case "Int8":
|
||||
return &Int8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[int8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int16":
|
||||
return &Int16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[int16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int32":
|
||||
return &Int32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[int32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Int64":
|
||||
return &Int64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[int64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt8":
|
||||
return &UInt8{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[uint8(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt16":
|
||||
return &UInt16{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[uint16(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt32":
|
||||
return &UInt32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[uint32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "UInt64":
|
||||
return &UInt64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[uint64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float32":
|
||||
return &Float32{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[float32(0)],
|
||||
},
|
||||
}, nil
|
||||
case "Float64":
|
||||
return &Float64{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[float64(0)],
|
||||
},
|
||||
}, nil
|
||||
case "String":
|
||||
return &String{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "UUID":
|
||||
return &UUID{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[string("")],
|
||||
},
|
||||
}, nil
|
||||
case "Date":
|
||||
_, offset := time.Unix(0, 0).In(timezone).Zone()
|
||||
return &Date{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
offset: int64(offset),
|
||||
}, nil
|
||||
case "DateTime":
|
||||
return &DateTime{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[time.Time{}],
|
||||
},
|
||||
Timezone: timezone,
|
||||
}, nil
|
||||
case "IPv4":
|
||||
return &IPv4{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
}, nil
|
||||
case "IPv6":
|
||||
return &IPv6{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "Array"):
|
||||
return parseArray(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "Nullable"):
|
||||
return parseNullable(name, chType, timezone)
|
||||
case strings.HasPrefix(chType, "FixedString"):
|
||||
return parseFixedString(name, chType)
|
||||
case strings.HasPrefix(chType, "Enum8"), strings.HasPrefix(chType, "Enum16"):
|
||||
return parseEnum(name, chType)
|
||||
case strings.HasPrefix(chType, "Decimal"):
|
||||
return parseDecimal(name, chType)
|
||||
}
|
||||
return nil, fmt.Errorf("column: unhandled type %v", chType)
|
||||
}
|
56
vendor/github.com/kshvakov/clickhouse/lib/column/common.go
generated
vendored
Normal file
56
vendor/github.com/kshvakov/clickhouse/lib/column/common.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ErrUnexpectedType struct {
|
||||
Column Column
|
||||
T interface{}
|
||||
}
|
||||
|
||||
func (err *ErrUnexpectedType) Error() string {
|
||||
return fmt.Sprintf("%s: unexpected type %T", err.Column, err.T)
|
||||
}
|
||||
|
||||
var baseTypes = map[interface{}]reflect.Value{
|
||||
int8(0): reflect.ValueOf(int8(0)),
|
||||
int16(0): reflect.ValueOf(int16(0)),
|
||||
int32(0): reflect.ValueOf(int32(0)),
|
||||
int64(0): reflect.ValueOf(int64(0)),
|
||||
uint8(0): reflect.ValueOf(uint8(0)),
|
||||
uint16(0): reflect.ValueOf(uint16(0)),
|
||||
uint32(0): reflect.ValueOf(uint32(0)),
|
||||
uint64(0): reflect.ValueOf(uint64(0)),
|
||||
float32(0): reflect.ValueOf(float32(0)),
|
||||
float64(0): reflect.ValueOf(float64(0)),
|
||||
string(""): reflect.ValueOf(string("")),
|
||||
time.Time{}: reflect.ValueOf(time.Time{}),
|
||||
}
|
||||
|
||||
type base struct {
|
||||
name, chType string
|
||||
valueOf reflect.Value
|
||||
}
|
||||
|
||||
func (base *base) Name() string {
|
||||
return base.name
|
||||
}
|
||||
|
||||
func (base *base) CHType() string {
|
||||
return base.chType
|
||||
}
|
||||
|
||||
func (base *base) ScanType() reflect.Type {
|
||||
return base.valueOf.Type()
|
||||
}
|
||||
|
||||
func (base *base) defaultValue() interface{} {
|
||||
return base.valueOf.Interface()
|
||||
}
|
||||
|
||||
func (base *base) String() string {
|
||||
return fmt.Sprintf("%s (%s)", base.name, base.chType)
|
||||
}
|
80
vendor/github.com/kshvakov/clickhouse/lib/column/date.go
generated
vendored
Normal file
80
vendor/github.com/kshvakov/clickhouse/lib/column/date.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Date struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (dt *Date) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
sec, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec)*24*3600-dt.offset, 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *Date) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = value.Unix() + int64(offset)
|
||||
case int16:
|
||||
return encoder.Int16(value)
|
||||
case int32:
|
||||
timestamp = int64(value) + dt.offset
|
||||
case int64:
|
||||
timestamp = value + dt.offset
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *time.Time:
|
||||
_, offset := value.Zone()
|
||||
timestamp = (*value).Unix() + int64(offset)
|
||||
case *int16:
|
||||
return encoder.Int16(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value) + dt.offset
|
||||
case *int64:
|
||||
timestamp = *value + dt.offset
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int16(int16(timestamp / 24 / 3600))
|
||||
}
|
||||
|
||||
func (dt *Date) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
0, 0, 0, 0, time.UTC,
|
||||
).Unix(), nil
|
||||
}
|
83
vendor/github.com/kshvakov/clickhouse/lib/column/datetime.go
generated
vendored
Normal file
83
vendor/github.com/kshvakov/clickhouse/lib/column/datetime.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type DateTime struct {
|
||||
base
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (dt *DateTime) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
sec, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return time.Unix(int64(sec), 0).In(dt.Timezone), nil
|
||||
}
|
||||
|
||||
func (dt *DateTime) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var timestamp int64
|
||||
switch value := v.(type) {
|
||||
case time.Time:
|
||||
if !value.IsZero() {
|
||||
timestamp = value.Unix()
|
||||
}
|
||||
case int16:
|
||||
timestamp = int64(value)
|
||||
case int32:
|
||||
timestamp = int64(value)
|
||||
case int64:
|
||||
timestamp = value
|
||||
case string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *time.Time:
|
||||
if value != nil && !(*value).IsZero() {
|
||||
timestamp = (*value).Unix()
|
||||
}
|
||||
case *int16:
|
||||
timestamp = int64(*value)
|
||||
case *int32:
|
||||
timestamp = int64(*value)
|
||||
case *int64:
|
||||
timestamp = *value
|
||||
case *string:
|
||||
var err error
|
||||
timestamp, err = dt.parse(*value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: dt,
|
||||
}
|
||||
}
|
||||
|
||||
return encoder.Int32(int32(timestamp))
|
||||
}
|
||||
|
||||
func (dt *DateTime) parse(value string) (int64, error) {
|
||||
tv, err := time.Parse("2006-01-02 15:04:05", value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return time.Date(
|
||||
time.Time(tv).Year(),
|
||||
time.Time(tv).Month(),
|
||||
time.Time(tv).Day(),
|
||||
time.Time(tv).Hour(),
|
||||
time.Time(tv).Minute(),
|
||||
time.Time(tv).Second(),
|
||||
0, time.UTC,
|
||||
).Unix(), nil
|
||||
}
|
241
vendor/github.com/kshvakov/clickhouse/lib/column/decimal.go
generated
vendored
Normal file
241
vendor/github.com/kshvakov/clickhouse/lib/column/decimal.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
// Table of powers of 10 for fast casting from floating types to decimal type
|
||||
// representations.
|
||||
var factors10 = []float64{
|
||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13,
|
||||
1e14, 1e15, 1e16, 1e17, 1e18,
|
||||
}
|
||||
|
||||
// Decimal represents Decimal(P, S) ClickHouse. Since there is support for
|
||||
// int128 in Golang, the implementation does not support to 128-bits decimals
|
||||
// as well. Decimal is represented as integral. Also floating-point types are
|
||||
// supported for query parameters.
|
||||
type Decimal struct {
|
||||
base
|
||||
nobits int // its domain is {32, 64}
|
||||
precision int
|
||||
scale int
|
||||
}
|
||||
|
||||
func (d *Decimal) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return decoder.Int32()
|
||||
case 64:
|
||||
return decoder.Int64()
|
||||
default:
|
||||
return nil, errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch d.nobits {
|
||||
case 32:
|
||||
return d.write32(encoder, v)
|
||||
case 64:
|
||||
return d.write64(encoder, v)
|
||||
default:
|
||||
return errors.New("unachievable execution path")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int32(floating float64) int32 {
|
||||
fixed := int32(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) float2int64(floating float64) int64 {
|
||||
fixed := int64(floating * factors10[d.scale])
|
||||
return fixed
|
||||
}
|
||||
|
||||
func (d *Decimal) write32(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int32(int32(v))
|
||||
case int16:
|
||||
return encoder.Int32(int32(v))
|
||||
case int32:
|
||||
return encoder.Int32(int32(v))
|
||||
case int64:
|
||||
return errors.New("narrowing type conversion from int64 to int32")
|
||||
|
||||
case uint8:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint16:
|
||||
return encoder.Int32(int32(v))
|
||||
case uint32:
|
||||
return errors.New("narrowing type conversion from uint32 to int32")
|
||||
case uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int32")
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int32(float64(v))
|
||||
return encoder.Int32(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int32:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int64:
|
||||
return errors.New("narrowing type conversion from int64 to int32")
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint16:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *uint32:
|
||||
return errors.New("narrowing type conversion from uint32 to int32")
|
||||
case *uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int32")
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int32(float64(*v))
|
||||
return encoder.Int32(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decimal) write64(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int8:
|
||||
return encoder.Int64(int64(v))
|
||||
case int16:
|
||||
return encoder.Int64(int64(v))
|
||||
case int32:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(int64(v))
|
||||
|
||||
case uint8:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint16:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint32:
|
||||
return encoder.Int64(int64(v))
|
||||
case uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int64")
|
||||
|
||||
case float32:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
case float64:
|
||||
fixed := d.float2int64(float64(v))
|
||||
return encoder.Int64(fixed)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(int64(*v))
|
||||
|
||||
case *uint8:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint16:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint32:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *uint64:
|
||||
return errors.New("narrowing type conversion from uint64 to int64")
|
||||
|
||||
case *float32:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
case *float64:
|
||||
fixed := d.float2int64(float64(*v))
|
||||
return encoder.Int64(fixed)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: d,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDecimal(name, chType string) (Column, error) {
|
||||
switch {
|
||||
case len(chType) < 12:
|
||||
fallthrough
|
||||
case !strings.HasPrefix(chType, "Decimal"):
|
||||
fallthrough
|
||||
case chType[7] != '(':
|
||||
fallthrough
|
||||
case chType[len(chType)-1] != ')':
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
var params = strings.Split(chType[8:len(chType)-1], ",")
|
||||
|
||||
if len(params) != 2 {
|
||||
return nil, fmt.Errorf("invalid Decimal format: '%s'", chType)
|
||||
}
|
||||
|
||||
params[0] = strings.TrimSpace(params[0])
|
||||
params[1] = strings.TrimSpace(params[1])
|
||||
|
||||
var err error
|
||||
var decimal = &Decimal{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
}
|
||||
|
||||
if decimal.precision, err = strconv.Atoi(params[0]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.precision < 1 {
|
||||
return nil, errors.New("wrong precision of Decimal type")
|
||||
}
|
||||
|
||||
if decimal.scale, err = strconv.Atoi(params[1]); err != nil {
|
||||
return nil, fmt.Errorf("'%s' is not Decimal type: %s", chType, err)
|
||||
} else if decimal.scale < 0 || decimal.scale > decimal.precision {
|
||||
return nil, errors.New("wrong scale of Decimal type")
|
||||
}
|
||||
|
||||
switch {
|
||||
case decimal.precision <= 9:
|
||||
decimal.nobits = 32
|
||||
decimal.valueOf = baseTypes[int32(0)]
|
||||
case decimal.precision <= 18:
|
||||
decimal.nobits = 64
|
||||
decimal.valueOf = baseTypes[int64(0)]
|
||||
case decimal.precision <= 38:
|
||||
return nil, errors.New("Decimal128 is not supported")
|
||||
default:
|
||||
return nil, errors.New("precision of Decimal exceeds max bound")
|
||||
}
|
||||
|
||||
return decimal, nil
|
||||
}
|
140
vendor/github.com/kshvakov/clickhouse/lib/column/enum.go
generated
vendored
Normal file
140
vendor/github.com/kshvakov/clickhouse/lib/column/enum.go
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Enum struct {
|
||||
iv map[string]interface{}
|
||||
vi map[interface{}]string
|
||||
base
|
||||
baseType interface{}
|
||||
}
|
||||
|
||||
func (enum *Enum) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
var (
|
||||
err error
|
||||
ident interface{}
|
||||
)
|
||||
switch enum.baseType.(type) {
|
||||
case int16:
|
||||
if ident, err = decoder.Int16(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
if ident, err = decoder.Int8(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if ident, found := enum.vi[ident]; found {
|
||||
return ident, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", ident)
|
||||
}
|
||||
|
||||
func (enum *Enum) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
ident, found := enum.iv[v]
|
||||
if !found {
|
||||
return fmt.Errorf("invalid Enum ident: %s", v)
|
||||
}
|
||||
switch ident := ident.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(ident)
|
||||
case int16:
|
||||
return encoder.Int16(ident)
|
||||
}
|
||||
case uint8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(int8(v))
|
||||
}
|
||||
case int8:
|
||||
if _, ok := enum.baseType.(int8); ok {
|
||||
return encoder.Int8(v)
|
||||
}
|
||||
case uint16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
case int16:
|
||||
if _, ok := enum.baseType.(int16); ok {
|
||||
return encoder.Int16(v)
|
||||
}
|
||||
case int64:
|
||||
switch enum.baseType.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(int8(v))
|
||||
case int16:
|
||||
return encoder.Int16(int16(v))
|
||||
}
|
||||
}
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: enum,
|
||||
}
|
||||
}
|
||||
|
||||
func (enum *Enum) defaultValue() interface{} {
|
||||
return enum.baseType
|
||||
}
|
||||
|
||||
func parseEnum(name, chType string) (*Enum, error) {
|
||||
var (
|
||||
data string
|
||||
isEnum16 bool
|
||||
)
|
||||
if len(chType) < 8 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(chType, "Enum8"):
|
||||
data = chType[6:]
|
||||
case strings.HasPrefix(chType, "Enum16"):
|
||||
data = chType[7:]
|
||||
isEnum16 = true
|
||||
default:
|
||||
return nil, fmt.Errorf("'%s' is not Enum type", chType)
|
||||
}
|
||||
enum := Enum{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[string("")],
|
||||
},
|
||||
iv: make(map[string]interface{}),
|
||||
vi: make(map[interface{}]string),
|
||||
}
|
||||
for _, block := range strings.Split(data[:len(data)-1], ",") {
|
||||
parts := strings.Split(block, "=")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid Enum format: %s", chType)
|
||||
}
|
||||
var (
|
||||
ident = strings.TrimSpace(parts[0])
|
||||
value, err = strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 16)
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid Enum value: %v", chType)
|
||||
}
|
||||
{
|
||||
var (
|
||||
ident = ident[1 : len(ident)-1]
|
||||
value interface{} = int16(value)
|
||||
)
|
||||
if !isEnum16 {
|
||||
value = int8(value.(int16))
|
||||
}
|
||||
if enum.baseType == nil {
|
||||
enum.baseType = value
|
||||
}
|
||||
enum.iv[ident] = value
|
||||
enum.vi[value] = ident
|
||||
}
|
||||
}
|
||||
return &enum, nil
|
||||
}
|
71
vendor/github.com/kshvakov/clickhouse/lib/column/fixed_string.go
generated
vendored
Normal file
71
vendor/github.com/kshvakov/clickhouse/lib/column/fixed_string.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type FixedString struct {
|
||||
base
|
||||
len int
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (str *FixedString) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(str.len)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(v), nil
|
||||
}
|
||||
|
||||
func (str *FixedString) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
var fixedString []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
fixedString = binary.Str2Bytes(v)
|
||||
case []byte:
|
||||
fixedString = v
|
||||
case encoding.BinaryMarshaler:
|
||||
bytes, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fixedString = bytes
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case len(fixedString) > str.len:
|
||||
return fmt.Errorf("too large value '%s' (expected %d, got %d)", fixedString, str.len, len(fixedString))
|
||||
case len(fixedString) < str.len:
|
||||
tmp := make([]byte, str.len)
|
||||
copy(tmp, fixedString)
|
||||
fixedString = tmp
|
||||
}
|
||||
if _, err := encoder.Write(fixedString); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFixedString(name, chType string) (*FixedString, error) {
|
||||
var strLen int
|
||||
if _, err := fmt.Sscanf(chType, "FixedString(%d)", &strLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FixedString{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
valueOf: baseTypes[string("")],
|
||||
},
|
||||
len: strLen,
|
||||
}, nil
|
||||
}
|
35
vendor/github.com/kshvakov/clickhouse/lib/column/float32.go
generated
vendored
Normal file
35
vendor/github.com/kshvakov/clickhouse/lib/column/float32.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Float32 struct{ base }
|
||||
|
||||
func (Float32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Float32()
|
||||
if err != nil {
|
||||
return float32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float32(v)
|
||||
case float64:
|
||||
return encoder.Float32(float32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float32(*v)
|
||||
case *float64:
|
||||
return encoder.Float32(float32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
35
vendor/github.com/kshvakov/clickhouse/lib/column/float64.go
generated
vendored
Normal file
35
vendor/github.com/kshvakov/clickhouse/lib/column/float64.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Float64 struct{ base }
|
||||
|
||||
func (Float64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Float64()
|
||||
if err != nil {
|
||||
return float64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (float *Float64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case float32:
|
||||
return encoder.Float64(float64(v))
|
||||
case float64:
|
||||
return encoder.Float64(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *float32:
|
||||
return encoder.Float64(float64(*v))
|
||||
case *float64:
|
||||
return encoder.Float64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: float,
|
||||
}
|
||||
}
|
39
vendor/github.com/kshvakov/clickhouse/lib/column/int16.go
generated
vendored
Normal file
39
vendor/github.com/kshvakov/clickhouse/lib/column/int16.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Int16 struct{ base }
|
||||
|
||||
func (Int16) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int16()
|
||||
if err != nil {
|
||||
return int16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int16:
|
||||
return encoder.Int16(v)
|
||||
case int64:
|
||||
return encoder.Int16(int16(v))
|
||||
case int:
|
||||
return encoder.Int16(int16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int16:
|
||||
return encoder.Int16(*v)
|
||||
case *int64:
|
||||
return encoder.Int16(int16(*v))
|
||||
case *int:
|
||||
return encoder.Int16(int16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
39
vendor/github.com/kshvakov/clickhouse/lib/column/int32.go
generated
vendored
Normal file
39
vendor/github.com/kshvakov/clickhouse/lib/column/int32.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Int32 struct{ base }
|
||||
|
||||
func (Int32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int32()
|
||||
if err != nil {
|
||||
return int32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int32:
|
||||
return encoder.Int32(v)
|
||||
case int64:
|
||||
return encoder.Int32(int32(v))
|
||||
case int:
|
||||
return encoder.Int32(int32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int32:
|
||||
return encoder.Int32(*v)
|
||||
case *int64:
|
||||
return encoder.Int32(int32(*v))
|
||||
case *int:
|
||||
return encoder.Int32(int32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
40
vendor/github.com/kshvakov/clickhouse/lib/column/int64.go
generated
vendored
Normal file
40
vendor/github.com/kshvakov/clickhouse/lib/column/int64.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Int64 struct{ base }
|
||||
|
||||
func (Int64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int64()
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int:
|
||||
return encoder.Int64(int64(v))
|
||||
case int64:
|
||||
return encoder.Int64(v)
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int:
|
||||
return encoder.Int64(int64(*v))
|
||||
case *int64:
|
||||
return encoder.Int64(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
49
vendor/github.com/kshvakov/clickhouse/lib/column/int8.go
generated
vendored
Normal file
49
vendor/github.com/kshvakov/clickhouse/lib/column/int8.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Int8 struct{ base }
|
||||
|
||||
func (Int8) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Int8()
|
||||
if err != nil {
|
||||
return int8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (i *Int8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case int8:
|
||||
return encoder.Int8(v)
|
||||
case int64:
|
||||
return encoder.Int8(int8(v))
|
||||
case int:
|
||||
return encoder.Int8(int8(v))
|
||||
case bool:
|
||||
if v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *int8:
|
||||
return encoder.Int8(*v)
|
||||
case *int64:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *int:
|
||||
return encoder.Int8(int8(*v))
|
||||
case *bool:
|
||||
if *v {
|
||||
return encoder.Int8(int8(1))
|
||||
}
|
||||
return encoder.Int8(int8(0))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: i,
|
||||
}
|
||||
}
|
73
vendor/github.com/kshvakov/clickhouse/lib/column/ip.go
generated
vendored
Normal file
73
vendor/github.com/kshvakov/clickhouse/lib/column/ip.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
IP type supporting for clickhouse as FixedString(16)
|
||||
*/
|
||||
|
||||
package column
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidScanType = errors.New("Invalid scan types")
|
||||
errInvalidScanValue = errors.New("Invalid scan value")
|
||||
)
|
||||
|
||||
// IP column type
|
||||
type IP net.IP
|
||||
|
||||
// Value implements the driver.Valuer interface, json field interface
|
||||
// Alignment on the right side
|
||||
func (ip IP) Value() (driver.Value, error) {
|
||||
return ip.MarshalBinary()
|
||||
}
|
||||
|
||||
func (ip IP) MarshalBinary() ([]byte, error) {
|
||||
if len(ip) < 16 {
|
||||
var (
|
||||
buff = make([]byte, 16)
|
||||
j = 0
|
||||
)
|
||||
for i := 16 - len(ip); i < 16; i++ {
|
||||
buff[i] = ip[j]
|
||||
j++
|
||||
}
|
||||
for i := 0; i < 16-len(ip); i++ {
|
||||
buff[i] = '\x00'
|
||||
}
|
||||
if len(ip) == 4 {
|
||||
buff[11] = '\xff'
|
||||
buff[10] = '\xff'
|
||||
}
|
||||
return buff, nil
|
||||
}
|
||||
return []byte(ip), nil
|
||||
}
|
||||
|
||||
// Scan implements the driver.Valuer interface, json field interface
|
||||
func (ip *IP) Scan(value interface{}) (err error) {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
if len(v) == 4 || len(v) == 16 {
|
||||
*ip = IP(v)
|
||||
} else {
|
||||
err = errInvalidScanValue
|
||||
}
|
||||
case string:
|
||||
if len(v) == 4 || len(v) == 16 {
|
||||
*ip = IP([]byte(v))
|
||||
} else {
|
||||
err = errInvalidScanValue
|
||||
}
|
||||
default:
|
||||
err = errInvalidScanType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer interface
|
||||
func (ip IP) String() string {
|
||||
return net.IP(ip).String()
|
||||
}
|
34
vendor/github.com/kshvakov/clickhouse/lib/column/ipv4.go
generated
vendored
Normal file
34
vendor/github.com/kshvakov/clickhouse/lib/column/ipv4.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type IPv4 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv4) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IPv4(v[3], v[2], v[1], v[0]), nil
|
||||
}
|
||||
|
||||
func (ip *IPv4) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
netIP, ok := v.(net.IP)
|
||||
if !ok {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
ip4 := netIP.To4()
|
||||
if _, err := encoder.Write([]byte{ip4[3], ip4[2], ip4[1], ip4[0]}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
33
vendor/github.com/kshvakov/clickhouse/lib/column/ipv6.go
generated
vendored
Normal file
33
vendor/github.com/kshvakov/clickhouse/lib/column/ipv6.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type IPv6 struct {
|
||||
base
|
||||
}
|
||||
|
||||
func (*IPv6) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.Fixed(16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return net.IP(v), nil
|
||||
}
|
||||
|
||||
func (ip *IPv6) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
netIP, ok := v.(net.IP)
|
||||
if !ok {
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: ip,
|
||||
}
|
||||
}
|
||||
if _, err := encoder.Write([]byte(netIP.To16())); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
81
vendor/github.com/kshvakov/clickhouse/lib/column/nullable.go
generated
vendored
Normal file
81
vendor/github.com/kshvakov/clickhouse/lib/column/nullable.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type Nullable struct {
|
||||
base
|
||||
column Column
|
||||
}
|
||||
|
||||
func (null *Nullable) ScanType() reflect.Type {
|
||||
return null.column.ScanType()
|
||||
}
|
||||
|
||||
func (null *Nullable) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
return null.column.Read(decoder)
|
||||
}
|
||||
|
||||
func (null *Nullable) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (null *Nullable) ReadNull(decoder *binary.Decoder, rows int) (_ []interface{}, err error) {
|
||||
var (
|
||||
isNull byte
|
||||
value interface{}
|
||||
nulls = make([]byte, rows)
|
||||
values = make([]interface{}, rows)
|
||||
)
|
||||
for i := 0; i < rows; i++ {
|
||||
if isNull, err = decoder.ReadByte(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nulls[i] = isNull
|
||||
}
|
||||
for i, isNull := range nulls {
|
||||
switch value, err = null.column.Read(decoder); true {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case isNull == 0:
|
||||
values[i] = value
|
||||
default:
|
||||
values[i] = nil
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
func (null *Nullable) WriteNull(nulls, encoder *binary.Encoder, v interface{}) error {
|
||||
if value := reflect.ValueOf(v); v == nil || (value.Kind() == reflect.Ptr && value.IsNil()) {
|
||||
if _, err := nulls.Write([]byte{1}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, null.column.defaultValue())
|
||||
}
|
||||
if _, err := nulls.Write([]byte{0}); err != nil {
|
||||
return err
|
||||
}
|
||||
return null.column.Write(encoder, v)
|
||||
}
|
||||
|
||||
func parseNullable(name, chType string, timezone *time.Location) (*Nullable, error) {
|
||||
if len(chType) < 14 {
|
||||
return nil, fmt.Errorf("invalid Nullable column type: %s", chType)
|
||||
}
|
||||
column, err := Factory(name, chType[9:][:len(chType)-10], timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Nullable(T): %v", err)
|
||||
}
|
||||
return &Nullable{
|
||||
base: base{
|
||||
name: name,
|
||||
chType: chType,
|
||||
},
|
||||
column: column,
|
||||
}, nil
|
||||
}
|
33
vendor/github.com/kshvakov/clickhouse/lib/column/string.go
generated
vendored
Normal file
33
vendor/github.com/kshvakov/clickhouse/lib/column/string.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type String struct{ base }
|
||||
|
||||
func (String) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.String()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (str *String) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
return encoder.String(v)
|
||||
case []byte:
|
||||
return encoder.RawString(v)
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *string:
|
||||
return encoder.String(*v)
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: str,
|
||||
}
|
||||
}
|
39
vendor/github.com/kshvakov/clickhouse/lib/column/uint16.go
generated
vendored
Normal file
39
vendor/github.com/kshvakov/clickhouse/lib/column/uint16.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type UInt16 struct{ base }
|
||||
|
||||
func (UInt16) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt16()
|
||||
if err != nil {
|
||||
return uint16(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt16) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint16:
|
||||
return encoder.UInt16(v)
|
||||
case int64:
|
||||
return encoder.UInt16(uint16(v))
|
||||
case int:
|
||||
return encoder.UInt16(uint16(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint16:
|
||||
return encoder.UInt16(*v)
|
||||
case *int64:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
case *int:
|
||||
return encoder.UInt16(uint16(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
39
vendor/github.com/kshvakov/clickhouse/lib/column/uint32.go
generated
vendored
Normal file
39
vendor/github.com/kshvakov/clickhouse/lib/column/uint32.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type UInt32 struct{ base }
|
||||
|
||||
func (UInt32) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt32()
|
||||
if err != nil {
|
||||
return uint32(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt32) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case uint32:
|
||||
return encoder.UInt32(v)
|
||||
case int64:
|
||||
return encoder.UInt32(uint32(v))
|
||||
case int:
|
||||
return encoder.UInt32(uint32(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint32:
|
||||
return encoder.UInt32(*v)
|
||||
case *int64:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
case *int:
|
||||
return encoder.UInt32(uint32(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
44
vendor/github.com/kshvakov/clickhouse/lib/column/uint64.go
generated
vendored
Normal file
44
vendor/github.com/kshvakov/clickhouse/lib/column/uint64.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type UInt64 struct{ base }
|
||||
|
||||
func (UInt64) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt64()
|
||||
if err != nil {
|
||||
return uint64(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt64) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case []byte:
|
||||
if _, err := encoder.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case uint64:
|
||||
return encoder.UInt64(v)
|
||||
case int64:
|
||||
return encoder.UInt64(uint64(v))
|
||||
case int:
|
||||
return encoder.UInt64(uint64(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *uint64:
|
||||
return encoder.UInt64(*v)
|
||||
case *int64:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
case *int:
|
||||
return encoder.UInt64(uint64(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
43
vendor/github.com/kshvakov/clickhouse/lib/column/uint8.go
generated
vendored
Normal file
43
vendor/github.com/kshvakov/clickhouse/lib/column/uint8.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type UInt8 struct{ base }
|
||||
|
||||
func (UInt8) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
v, err := decoder.UInt8()
|
||||
if err != nil {
|
||||
return uint8(0), err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (u *UInt8) Write(encoder *binary.Encoder, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
return encoder.Bool(v)
|
||||
case uint8:
|
||||
return encoder.UInt8(v)
|
||||
case int64:
|
||||
return encoder.UInt8(uint8(v))
|
||||
case int:
|
||||
return encoder.UInt8(uint8(v))
|
||||
|
||||
// this relies on Nullable never sending nil values through
|
||||
case *bool:
|
||||
return encoder.Bool(*v)
|
||||
case *uint8:
|
||||
return encoder.UInt8(*v)
|
||||
case *int64:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
case *int:
|
||||
return encoder.UInt8(uint8(*v))
|
||||
}
|
||||
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
139
vendor/github.com/kshvakov/clickhouse/lib/column/uuid.go
generated
vendored
Normal file
139
vendor/github.com/kshvakov/clickhouse/lib/column/uuid.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package column
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
UUIDLen = 16
|
||||
NullUUID = "00000000-0000-0000-0000-000000000000"
|
||||
)
|
||||
|
||||
var ErrInvalidUUIDFormat = errors.New("invalid UUID format")
|
||||
|
||||
type UUID struct {
|
||||
base
|
||||
scanType reflect.Type
|
||||
}
|
||||
|
||||
func (*UUID) Read(decoder *binary.Decoder) (interface{}, error) {
|
||||
src, err := decoder.Fixed(UUIDLen)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
src = swap(src)
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
return string(uuid[:]), nil
|
||||
}
|
||||
|
||||
func (u *UUID) Write(encoder *binary.Encoder, v interface{}) (err error) {
|
||||
var uuid []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
if uuid, err = uuid2bytes(v); err != nil {
|
||||
return err
|
||||
}
|
||||
case []byte:
|
||||
if len(v) != UUIDLen {
|
||||
return fmt.Errorf("invalid raw UUID len '%s' (expected %d, got %d)", uuid, UUIDLen, len(uuid))
|
||||
}
|
||||
uuid = make([]byte, 16)
|
||||
copy(uuid, v)
|
||||
default:
|
||||
return &ErrUnexpectedType{
|
||||
T: v,
|
||||
Column: u,
|
||||
}
|
||||
}
|
||||
|
||||
uuid = swap(uuid)
|
||||
|
||||
if _, err := encoder.Write(uuid); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func swap(src []byte) []byte {
|
||||
_ = src[15]
|
||||
src[0], src[7] = src[7], src[0]
|
||||
src[1], src[6] = src[6], src[1]
|
||||
src[2], src[5] = src[5], src[2]
|
||||
src[3], src[4] = src[4], src[3]
|
||||
src[8], src[15] = src[15], src[8]
|
||||
src[9], src[14] = src[14], src[9]
|
||||
src[10], src[13] = src[13], src[10]
|
||||
src[11], src[12] = src[12], src[11]
|
||||
return src
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
strLength := len(str)
|
||||
if strLength == 0 {
|
||||
str = NullUUID
|
||||
} else if strLength != 36 {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, ErrInvalidUUIDFormat
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
294
vendor/github.com/kshvakov/clickhouse/lib/data/block.go
generated
vendored
Normal file
294
vendor/github.com/kshvakov/clickhouse/lib/data/block.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
"github.com/kshvakov/clickhouse/lib/column"
|
||||
wb "github.com/kshvakov/clickhouse/lib/writebuffer"
|
||||
)
|
||||
|
||||
type offset [][]int
|
||||
|
||||
type Block struct {
|
||||
Values [][]interface{}
|
||||
Columns []column.Column
|
||||
NumRows uint64
|
||||
NumColumns uint64
|
||||
offsets []offset
|
||||
buffers []*buffer
|
||||
info blockInfo
|
||||
}
|
||||
|
||||
func (block *Block) Copy() *Block {
|
||||
return &Block{
|
||||
Columns: block.Columns,
|
||||
NumColumns: block.NumColumns,
|
||||
info: block.info,
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) ColumnNames() []string {
|
||||
names := make([]string, 0, len(block.Columns))
|
||||
for _, column := range block.Columns {
|
||||
names = append(names, column.Name())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (block *Block) Read(serverInfo *ServerInfo, decoder *binary.Decoder) (err error) {
|
||||
if err = block.info.read(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if block.NumColumns, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if block.NumRows, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values = make([][]interface{}, block.NumColumns)
|
||||
if block.NumRows > 10 {
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
block.Values[i] = make([]interface{}, 0, block.NumRows)
|
||||
}
|
||||
}
|
||||
for i := 0; i < int(block.NumColumns); i++ {
|
||||
var (
|
||||
value interface{}
|
||||
columnName string
|
||||
columnType string
|
||||
)
|
||||
if columnName, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
if columnType, err = decoder.String(); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := column.Factory(columnName, columnType, serverInfo.Timezone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
block.Columns = append(block.Columns, c)
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
if block.Values[i], err = column.ReadArray(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if block.Values[i], err = column.ReadNull(decoder, int(block.NumRows)); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
for row := 0; row < int(block.NumRows); row++ {
|
||||
if value, err = column.Read(decoder); err != nil {
|
||||
return err
|
||||
}
|
||||
block.Values[i] = append(block.Values[i], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) writeArray(column column.Column, value reflect.Value, num, level int) error {
|
||||
switch {
|
||||
case value.Kind() == reflect.Slice:
|
||||
if len(block.offsets[num]) < level {
|
||||
block.offsets[num] = append(block.offsets[num], []int{value.Len()})
|
||||
} else {
|
||||
block.offsets[num][level-1] = append(
|
||||
block.offsets[num][level-1],
|
||||
block.offsets[num][level-1][len(block.offsets[num][level-1])-1]+value.Len(),
|
||||
)
|
||||
}
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
if err := block.writeArray(column, value.Index(i), num, level+1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err := column.Write(block.buffers[num].Column, value.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) AppendRow(args []driver.Value) error {
|
||||
if len(block.Columns) != len(args) {
|
||||
return fmt.Errorf("block: expected %d arguments (columns: %s), got %d", len(block.Columns), strings.Join(block.ColumnNames(), ", "), len(args))
|
||||
}
|
||||
block.Reserve()
|
||||
{
|
||||
block.NumRows++
|
||||
}
|
||||
for num, c := range block.Columns {
|
||||
switch column := c.(type) {
|
||||
case *column.Array:
|
||||
value := reflect.ValueOf(args[num])
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
if err := block.writeArray(c, value, num, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
case *column.Nullable:
|
||||
if err := column.WriteNull(block.buffers[num].Offset, block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := column.Write(block.buffers[num].Column, args[num]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) Reserve() {
|
||||
if len(block.buffers) == 0 {
|
||||
block.buffers = make([]*buffer, len(block.Columns))
|
||||
block.offsets = make([]offset, len(block.Columns))
|
||||
for i := 0; i < len(block.Columns); i++ {
|
||||
var (
|
||||
offsetBuffer = wb.New(wb.InitialSize)
|
||||
columnBuffer = wb.New(wb.InitialSize)
|
||||
)
|
||||
block.buffers[i] = &buffer{
|
||||
Offset: binary.NewEncoder(offsetBuffer),
|
||||
Column: binary.NewEncoder(columnBuffer),
|
||||
offsetBuffer: offsetBuffer,
|
||||
columnBuffer: columnBuffer,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Reset() {
|
||||
block.NumRows = 0
|
||||
block.NumColumns = 0
|
||||
for _, buffer := range block.buffers {
|
||||
buffer.reset()
|
||||
}
|
||||
{
|
||||
block.offsets = nil
|
||||
block.buffers = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (block *Block) Write(serverInfo *ServerInfo, encoder *binary.Encoder) error {
|
||||
if err := block.info.write(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
encoder.Uvarint(block.NumColumns)
|
||||
encoder.Uvarint(block.NumRows)
|
||||
defer func() {
|
||||
block.NumRows = 0
|
||||
for i := range block.offsets {
|
||||
block.offsets[i] = offset{}
|
||||
}
|
||||
}()
|
||||
for i, column := range block.Columns {
|
||||
encoder.String(column.Name())
|
||||
encoder.String(column.CHType())
|
||||
if len(block.buffers) == len(block.Columns) {
|
||||
for _, offsets := range block.offsets[i] {
|
||||
for _, offset := range offsets {
|
||||
if err := encoder.UInt64(uint64(offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := block.buffers[i].WriteTo(encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockInfo struct {
|
||||
num1 uint64
|
||||
isOverflows bool
|
||||
num2 uint64
|
||||
bucketNum int32
|
||||
num3 uint64
|
||||
}
|
||||
|
||||
func (info *blockInfo) read(decoder *binary.Decoder) error {
|
||||
var err error
|
||||
if info.num1, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.isOverflows, err = decoder.Bool(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num2, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum, err = decoder.Int32(); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.num3, err = decoder.Uvarint(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (info *blockInfo) write(encoder *binary.Encoder) error {
|
||||
if err := encoder.Uvarint(1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Bool(info.isOverflows); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(2); err != nil {
|
||||
return err
|
||||
}
|
||||
if info.bucketNum == 0 {
|
||||
info.bucketNum = -1
|
||||
}
|
||||
if err := encoder.Int32(info.bucketNum); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Uvarint(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type buffer struct {
|
||||
Offset *binary.Encoder
|
||||
Column *binary.Encoder
|
||||
offsetBuffer *wb.WriteBuffer
|
||||
columnBuffer *wb.WriteBuffer
|
||||
}
|
||||
|
||||
func (buf *buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var size int64
|
||||
{
|
||||
ln, err := buf.offsetBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
{
|
||||
ln, err := buf.columnBuffer.WriteTo(w)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
size += ln
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (buf *buffer) reset() {
|
||||
buf.offsetBuffer.Reset()
|
||||
buf.columnBuffer.Reset()
|
||||
}
|
98
vendor/github.com/kshvakov/clickhouse/lib/data/block_write_column.go
generated
vendored
Normal file
98
vendor/github.com/kshvakov/clickhouse/lib/data/block_write_column.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
func (block *Block) WriteDate(c int, v time.Time) error {
|
||||
_, offset := v.Zone()
|
||||
nday := (v.Unix() + int64(offset)) / 24 / 3600
|
||||
return block.buffers[c].Column.UInt16(uint16(nday))
|
||||
}
|
||||
|
||||
func (block *Block) WriteDateTime(c int, v time.Time) error {
|
||||
return block.buffers[c].Column.UInt32(uint32(v.Unix()))
|
||||
}
|
||||
|
||||
func (block *Block) WriteBool(c int, v bool) error {
|
||||
if v {
|
||||
return block.buffers[c].Column.UInt8(1)
|
||||
}
|
||||
return block.buffers[c].Column.UInt8(0)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt8(c int, v int8) error {
|
||||
return block.buffers[c].Column.Int8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt16(c int, v int16) error {
|
||||
return block.buffers[c].Column.Int16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt32(c int, v int32) error {
|
||||
return block.buffers[c].Column.Int32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteInt64(c int, v int64) error {
|
||||
return block.buffers[c].Column.Int64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt8(c int, v uint8) error {
|
||||
return block.buffers[c].Column.UInt8(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt16(c int, v uint16) error {
|
||||
return block.buffers[c].Column.UInt16(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt32(c int, v uint32) error {
|
||||
return block.buffers[c].Column.UInt32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteUInt64(c int, v uint64) error {
|
||||
return block.buffers[c].Column.UInt64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat32(c int, v float32) error {
|
||||
return block.buffers[c].Column.Float32(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteFloat64(c int, v float64) error {
|
||||
return block.buffers[c].Column.Float64(v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteBytes(c int, v []byte) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteString(c int, v string) error {
|
||||
if err := block.buffers[c].Column.Uvarint(uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := block.buffers[c].Column.Write(binary.Str2Bytes(v)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (block *Block) WriteFixedString(c int, v []byte) error {
|
||||
return block.Columns[c].Write(block.buffers[c].Column, v)
|
||||
}
|
||||
|
||||
func (block *Block) WriteArray(c int, v interface{}) error {
|
||||
value := reflect.ValueOf(v)
|
||||
if value.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("unsupported Array(T) type [%T]", value.Interface())
|
||||
}
|
||||
return block.writeArray(block.Columns[c], value, c, 1)
|
||||
}
|
29
vendor/github.com/kshvakov/clickhouse/lib/data/client_info.go
generated
vendored
Normal file
29
vendor/github.com/kshvakov/clickhouse/lib/data/client_info.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
const ClientName = "Golang SQLDriver"
|
||||
|
||||
const (
|
||||
ClickHouseRevision = 54213
|
||||
ClickHouseDBMSVersionMajor = 1
|
||||
ClickHouseDBMSVersionMinor = 1
|
||||
)
|
||||
|
||||
type ClientInfo struct{}
|
||||
|
||||
func (ClientInfo) Write(encoder *binary.Encoder) error {
|
||||
encoder.String(ClientName)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMajor)
|
||||
encoder.Uvarint(ClickHouseDBMSVersionMinor)
|
||||
encoder.Uvarint(ClickHouseRevision)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ClientInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d", ClientName, ClickHouseDBMSVersionMajor, ClickHouseDBMSVersionMinor, ClickHouseRevision)
|
||||
}
|
47
vendor/github.com/kshvakov/clickhouse/lib/data/server_info.go
generated
vendored
Normal file
47
vendor/github.com/kshvakov/clickhouse/lib/data/server_info.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
//"io"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
type ServerInfo struct {
|
||||
Name string
|
||||
Revision uint64
|
||||
MinorVersion uint64
|
||||
MajorVersion uint64
|
||||
Timezone *time.Location
|
||||
}
|
||||
|
||||
func (srv *ServerInfo) Read(decoder *binary.Decoder) (err error) {
|
||||
if srv.Name, err = decoder.String(); err != nil {
|
||||
return fmt.Errorf("could not read server name: %v", err)
|
||||
}
|
||||
if srv.MajorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server major version: %v", err)
|
||||
}
|
||||
if srv.MinorVersion, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server minor version: %v", err)
|
||||
}
|
||||
if srv.Revision, err = decoder.Uvarint(); err != nil {
|
||||
return fmt.Errorf("could not read server revision: %v", err)
|
||||
}
|
||||
if srv.Revision >= protocol.DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE {
|
||||
timezone, err := decoder.String()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read server timezone: %v", err)
|
||||
}
|
||||
if srv.Timezone, err = time.LoadLocation(timezone); err != nil {
|
||||
return fmt.Errorf("could not load time location: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (srv ServerInfo) String() string {
|
||||
return fmt.Sprintf("%s %d.%d.%d (%s)", srv.Name, srv.MajorVersion, srv.MinorVersion, srv.Revision, srv.Timezone)
|
||||
}
|
23
vendor/github.com/kshvakov/clickhouse/lib/leakypool/leaky_pool.go
generated
vendored
Normal file
23
vendor/github.com/kshvakov/clickhouse/lib/leakypool/leaky_pool.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package leakypool
|
||||
|
||||
var pool chan []byte
|
||||
|
||||
func InitBytePool(size int) {
|
||||
pool = make(chan []byte, size)
|
||||
}
|
||||
|
||||
func GetBytes(size, capacity int) (b []byte) {
|
||||
select {
|
||||
case b = <-pool:
|
||||
default:
|
||||
b = make([]byte, size, capacity)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func PutBytes(b []byte) {
|
||||
select {
|
||||
case pool <- b:
|
||||
default:
|
||||
}
|
||||
}
|
23
vendor/github.com/kshvakov/clickhouse/lib/lz4/LICENSE
generated
vendored
Normal file
23
vendor/github.com/kshvakov/clickhouse/lib/lz4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
7
vendor/github.com/kshvakov/clickhouse/lib/lz4/doc.go
generated
vendored
Normal file
7
vendor/github.com/kshvakov/clickhouse/lib/lz4/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
// Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
// @LINK: https://github.com/bkaradzic/go-lz4
|
||||
// @NOTE: The code is modified to be high performance and less memory usage
|
||||
|
||||
package lz4
|
23
vendor/github.com/kshvakov/clickhouse/lib/lz4/fuzz.go
generated
vendored
Normal file
23
vendor/github.com/kshvakov/clickhouse/lib/lz4/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build gofuzz
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ln := binary.LittleEndian.Uint32(data)
|
||||
if ln > (1 << 21) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if _, err := Decode(nil, data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
179
vendor/github.com/kshvakov/clickhouse/lib/lz4/reader.go
generated
vendored
Normal file
179
vendor/github.com/kshvakov/clickhouse/lib/lz4/reader.go
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) (int, error) {
|
||||
d := decoder{src: src, dst: dst, spos: 0}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return len(d.dst), d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return len(d.dst), nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if int(d.dpos+length) > len(d.dst) {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
203
vendor/github.com/kshvakov/clickhouse/lib/lz4/writer.go
generated
vendored
Normal file
203
vendor/github.com/kshvakov/clickhouse/lib/lz4/writer.go
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 16
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
ErrEncodeTooSmall = errors.New("encode buffer too small")
|
||||
|
||||
hashPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]uint32, hashTableSize)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) (compressedSize int, error error) {
|
||||
if len(src) >= MaxInputSize {
|
||||
return 0, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
return 0, ErrEncodeTooSmall
|
||||
}
|
||||
|
||||
hashTable := hashPool.Get().([]uint32)
|
||||
for i := range hashTable {
|
||||
hashTable[i] = 0
|
||||
}
|
||||
e := encoder{src: src, dst: dst, hashTable: hashTable}
|
||||
defer func() {
|
||||
hashPool.Put(hashTable)
|
||||
}()
|
||||
// binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
// e.dpos = 0
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return int(e.dpos), nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
4
vendor/github.com/kshvakov/clickhouse/lib/protocol/README.md
generated
vendored
Normal file
4
vendor/github.com/kshvakov/clickhouse/lib/protocol/README.md
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# ClickHouse Native protocol
|
||||
|
||||
# Handshake
|
||||
|
35
vendor/github.com/kshvakov/clickhouse/lib/protocol/protocol.go
generated
vendored
Normal file
35
vendor/github.com/kshvakov/clickhouse/lib/protocol/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package protocol
|
||||
|
||||
const (
|
||||
DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058
|
||||
DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060
|
||||
)
|
||||
|
||||
const (
|
||||
ClientHello = 0
|
||||
ClientQuery = 1
|
||||
ClientData = 2
|
||||
ClientCancel = 3
|
||||
ClientPing = 4
|
||||
)
|
||||
|
||||
const (
|
||||
CompressEnable uint64 = 1
|
||||
CompressDisable uint64 = 0
|
||||
)
|
||||
|
||||
const (
|
||||
StateComplete = 2
|
||||
)
|
||||
|
||||
const (
|
||||
ServerHello = 0
|
||||
ServerData = 1
|
||||
ServerException = 2
|
||||
ServerProgress = 3
|
||||
ServerPong = 4
|
||||
ServerEndOfStream = 5
|
||||
ServerProfileInfo = 6
|
||||
ServerTotals = 7
|
||||
ServerExtremes = 8
|
||||
)
|
48
vendor/github.com/kshvakov/clickhouse/lib/types/date.go
generated
vendored
Normal file
48
vendor/github.com/kshvakov/clickhouse/lib/types/date.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Timezoneless date/datetime types
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.Date(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type Date time.Time
|
||||
|
||||
func (date Date) Value() (driver.Value, error) {
|
||||
return date.convert(), nil
|
||||
}
|
||||
|
||||
func (date Date) convert() time.Time {
|
||||
return time.Date(time.Time(date).Year(), time.Time(date).Month(), time.Time(date).Day(), 0, 0, 0, 0, time.UTC)
|
||||
}
|
||||
|
||||
// Truncate timezone
|
||||
//
|
||||
// clickhouse.DateTime(time.Date(2017, 1, 1, 0, 0, 0, 0, time.Local)) -> time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
type DateTime time.Time
|
||||
|
||||
func (datetime DateTime) Value() (driver.Value, error) {
|
||||
return datetime.convert(), nil
|
||||
}
|
||||
|
||||
func (datetime DateTime) convert() time.Time {
|
||||
return time.Date(
|
||||
time.Time(datetime).Year(),
|
||||
time.Time(datetime).Month(),
|
||||
time.Time(datetime).Day(),
|
||||
time.Time(datetime).Hour(),
|
||||
time.Time(datetime).Minute(),
|
||||
time.Time(datetime).Second(),
|
||||
1,
|
||||
time.UTC,
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Valuer = Date{}
|
||||
_ driver.Valuer = DateTime{}
|
||||
)
|
99
vendor/github.com/kshvakov/clickhouse/lib/types/uuid.go
generated
vendored
Normal file
99
vendor/github.com/kshvakov/clickhouse/lib/types/uuid.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var InvalidUUIDFormatError = errors.New("invalid UUID format")
|
||||
|
||||
// this type will be deprecated because the ClickHouse server (>=1.1.54276) has a built-in type UUID
|
||||
type UUID string
|
||||
|
||||
func (str UUID) Value() (driver.Value, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid2bytes(string(str))
|
||||
}
|
||||
|
||||
func (str *UUID) Scan(v interface{}) error {
|
||||
var src []byte
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
src = []byte(v)
|
||||
case []byte:
|
||||
src = v
|
||||
}
|
||||
|
||||
if len(src) != 16 {
|
||||
return fmt.Errorf("invalid UUID length: %d", len(src))
|
||||
}
|
||||
|
||||
var uuid [36]byte
|
||||
{
|
||||
hex.Encode(uuid[:], src[:4])
|
||||
uuid[8] = '-'
|
||||
hex.Encode(uuid[9:13], src[4:6])
|
||||
uuid[13] = '-'
|
||||
hex.Encode(uuid[14:18], src[6:8])
|
||||
uuid[18] = '-'
|
||||
hex.Encode(uuid[19:23], src[8:10])
|
||||
uuid[23] = '-'
|
||||
hex.Encode(uuid[24:], src[10:])
|
||||
}
|
||||
*str = UUID(uuid[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func uuid2bytes(str string) ([]byte, error) {
|
||||
var uuid [16]byte
|
||||
if str[8] != '-' || str[13] != '-' || str[18] != '-' || str[23] != '-' {
|
||||
return nil, InvalidUUIDFormatError
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11, 14, 16,
|
||||
19, 21, 24, 26,
|
||||
28, 30, 32, 34,
|
||||
} {
|
||||
if v, ok := xtob(str[x], str[x+1]); !ok {
|
||||
return nil, InvalidUUIDFormatError
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
||||
|
||||
var _ driver.Valuer = UUID("")
|
113
vendor/github.com/kshvakov/clickhouse/lib/writebuffer/buffer.go
generated
vendored
Normal file
113
vendor/github.com/kshvakov/clickhouse/lib/writebuffer/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
package writebuffer
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/leakypool"
|
||||
)
|
||||
|
||||
const InitialSize = 256 * 1024
|
||||
|
||||
func New(initSize int) *WriteBuffer {
|
||||
wb := &WriteBuffer{}
|
||||
wb.addChunk(0, initSize)
|
||||
return wb
|
||||
}
|
||||
|
||||
type WriteBuffer struct {
|
||||
chunks [][]byte
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Write(data []byte) (int, error) {
|
||||
var (
|
||||
chunkIdx = len(wb.chunks) - 1
|
||||
dataSize = len(data)
|
||||
)
|
||||
for {
|
||||
freeSize := cap(wb.chunks[chunkIdx]) - len(wb.chunks[chunkIdx])
|
||||
if freeSize >= len(data) {
|
||||
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data...)
|
||||
return dataSize, nil
|
||||
}
|
||||
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data[:freeSize]...)
|
||||
data = data[freeSize:]
|
||||
wb.addChunk(0, wb.calcCap(len(data)))
|
||||
chunkIdx++
|
||||
}
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var size int64
|
||||
for _, chunk := range wb.chunks {
|
||||
ln, err := w.Write(chunk)
|
||||
if err != nil {
|
||||
wb.Reset()
|
||||
return 0, err
|
||||
}
|
||||
size += int64(ln)
|
||||
}
|
||||
wb.Reset()
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Bytes() []byte {
|
||||
if len(wb.chunks) == 1 {
|
||||
return wb.chunks[0]
|
||||
}
|
||||
bytes := make([]byte, 0, wb.len())
|
||||
for _, chunk := range wb.chunks {
|
||||
bytes = append(bytes, chunk...)
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) addChunk(size, capacity int) {
|
||||
chunk := leakypool.GetBytes(size, capacity)
|
||||
if cap(chunk) >= size {
|
||||
chunk = chunk[:size]
|
||||
}
|
||||
wb.chunks = append(wb.chunks, chunk)
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) len() int {
|
||||
var v int
|
||||
for _, chunk := range wb.chunks {
|
||||
v += len(chunk)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) calcCap(dataSize int) int {
|
||||
dataSize = max(dataSize, 64)
|
||||
if len(wb.chunks) == 0 {
|
||||
return dataSize
|
||||
}
|
||||
// Always double the size of the last chunk
|
||||
return max(dataSize, cap(wb.chunks[len(wb.chunks)-1])*2)
|
||||
}
|
||||
|
||||
func (wb *WriteBuffer) Reset() {
|
||||
if len(wb.chunks) == 0 {
|
||||
return
|
||||
}
|
||||
// Recycle all chunks except the last one
|
||||
chunkSizeThreshold := cap(wb.chunks[0])
|
||||
for _, chunk := range wb.chunks[:len(wb.chunks)-1] {
|
||||
// Drain chunks smaller than the initial size
|
||||
if cap(chunk) >= chunkSizeThreshold {
|
||||
leakypool.PutBytes(chunk[:0])
|
||||
} else {
|
||||
chunkSizeThreshold = cap(chunk)
|
||||
}
|
||||
}
|
||||
// Keep the largest chunk
|
||||
wb.chunks[0] = wb.chunks[len(wb.chunks)-1][:0]
|
||||
wb.chunks = wb.chunks[:1]
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if b > a {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
282
vendor/github.com/kshvakov/clickhouse/query_settings.go
generated
vendored
Normal file
282
vendor/github.com/kshvakov/clickhouse/query_settings.go
generated
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/binary"
|
||||
)
|
||||
|
||||
type querySettingType int
|
||||
|
||||
// all possible query setting's data type
|
||||
const (
|
||||
uintQS querySettingType = iota + 1
|
||||
intQS
|
||||
boolQS
|
||||
timeQS
|
||||
)
|
||||
|
||||
// description of single query setting
|
||||
type querySettingInfo struct {
|
||||
name string
|
||||
qsType querySettingType
|
||||
}
|
||||
|
||||
// all possible query settings
|
||||
var querySettingList = []querySettingInfo{
|
||||
{"min_compress_block_size", uintQS},
|
||||
{"max_compress_block_size", uintQS},
|
||||
{"max_block_size", uintQS},
|
||||
{"max_insert_block_size", uintQS},
|
||||
{"min_insert_block_size_rows", uintQS},
|
||||
{"min_insert_block_size_bytes", uintQS},
|
||||
{"max_read_buffer_size", uintQS},
|
||||
{"max_distributed_connections", uintQS},
|
||||
{"max_query_size", uintQS},
|
||||
{"interactive_delay", uintQS},
|
||||
{"poll_interval", uintQS},
|
||||
{"distributed_connections_pool_size", uintQS},
|
||||
{"connections_with_failover_max_tries", uintQS},
|
||||
{"background_pool_size", uintQS},
|
||||
{"background_schedule_pool_size", uintQS},
|
||||
{"replication_alter_partitions_sync", uintQS},
|
||||
{"replication_alter_columns_timeout", uintQS},
|
||||
{"min_count_to_compile", uintQS},
|
||||
{"min_count_to_compile_expression", uintQS},
|
||||
{"group_by_two_level_threshold", uintQS},
|
||||
{"group_by_two_level_threshold_bytes", uintQS},
|
||||
{"aggregation_memory_efficient_merge_threads", uintQS},
|
||||
{"max_parallel_replicas", uintQS},
|
||||
{"parallel_replicas_count", uintQS},
|
||||
{"parallel_replica_offset", uintQS},
|
||||
{"merge_tree_min_rows_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_bytes_for_concurrent_read", uintQS},
|
||||
{"merge_tree_min_rows_for_seek", uintQS},
|
||||
{"merge_tree_min_bytes_for_seek", uintQS},
|
||||
{"merge_tree_coarse_index_granularity", uintQS},
|
||||
{"merge_tree_max_rows_to_use_cache", uintQS},
|
||||
{"merge_tree_max_bytes_to_use_cache", uintQS},
|
||||
{"mysql_max_rows_to_insert", uintQS},
|
||||
{"optimize_min_equality_disjunction_chain_length", uintQS},
|
||||
{"min_bytes_to_use_direct_io", uintQS},
|
||||
{"mark_cache_min_lifetime", uintQS},
|
||||
{"priority", uintQS},
|
||||
{"log_queries_cut_to_length", uintQS},
|
||||
{"max_concurrent_queries_for_user", uintQS},
|
||||
{"insert_quorum", uintQS},
|
||||
{"select_sequential_consistency", uintQS},
|
||||
{"table_function_remote_max_addresses", uintQS},
|
||||
{"read_backoff_max_throughput", uintQS},
|
||||
{"read_backoff_min_events", uintQS},
|
||||
{"output_format_pretty_max_rows", uintQS},
|
||||
{"output_format_pretty_max_column_pad_width", uintQS},
|
||||
{"output_format_parquet_row_group_size", uintQS},
|
||||
{"http_headers_progress_interval_ms", uintQS},
|
||||
{"input_format_allow_errors_num", uintQS},
|
||||
{"preferred_block_size_bytes", uintQS},
|
||||
{"max_replica_delay_for_distributed_queries", uintQS},
|
||||
{"preferred_max_column_in_block_size_bytes", uintQS},
|
||||
{"insert_distributed_timeout", uintQS},
|
||||
{"odbc_max_field_size", uintQS},
|
||||
{"max_rows_to_read", uintQS},
|
||||
{"max_bytes_to_read", uintQS},
|
||||
{"max_rows_to_group_by", uintQS},
|
||||
{"max_bytes_before_external_group_by", uintQS},
|
||||
{"max_rows_to_sort", uintQS},
|
||||
{"max_bytes_to_sort", uintQS},
|
||||
{"max_bytes_before_external_sort", uintQS},
|
||||
{"max_bytes_before_remerge_sort", uintQS},
|
||||
{"max_result_rows", uintQS},
|
||||
{"max_result_bytes", uintQS},
|
||||
{"min_execution_speed", uintQS},
|
||||
{"max_execution_speed", uintQS},
|
||||
{"min_execution_speed_bytes", uintQS},
|
||||
{"max_execution_speed_bytes", uintQS},
|
||||
{"max_columns_to_read", uintQS},
|
||||
{"max_temporary_columns", uintQS},
|
||||
{"max_temporary_non_const_columns", uintQS},
|
||||
{"max_subquery_depth", uintQS},
|
||||
{"max_pipeline_depth", uintQS},
|
||||
{"max_ast_depth", uintQS},
|
||||
{"max_ast_elements", uintQS},
|
||||
{"max_expanded_ast_elements", uintQS},
|
||||
{"readonly", uintQS},
|
||||
{"max_rows_in_set", uintQS},
|
||||
{"max_bytes_in_set", uintQS},
|
||||
{"max_rows_in_join", uintQS},
|
||||
{"max_bytes_in_join", uintQS},
|
||||
{"max_rows_to_transfer", uintQS},
|
||||
{"max_bytes_to_transfer", uintQS},
|
||||
{"max_rows_in_distinct", uintQS},
|
||||
{"max_bytes_in_distinct", uintQS},
|
||||
{"max_memory_usage", uintQS},
|
||||
{"max_memory_usage_for_user", uintQS},
|
||||
{"max_memory_usage_for_all_queries", uintQS},
|
||||
{"max_network_bandwidth", uintQS},
|
||||
{"max_network_bytes", uintQS},
|
||||
{"max_network_bandwidth_for_user", uintQS},
|
||||
{"max_network_bandwidth_for_all_users", uintQS},
|
||||
{"low_cardinality_max_dictionary_size", uintQS},
|
||||
{"max_fetch_partition_retries_count", uintQS},
|
||||
{"http_max_multipart_form_data_size", uintQS},
|
||||
{"max_partitions_per_insert_block", uintQS},
|
||||
|
||||
{"network_zstd_compression_level", intQS},
|
||||
{"http_zlib_compression_level", intQS},
|
||||
{"distributed_ddl_task_timeout", intQS},
|
||||
|
||||
{"extremes", boolQS},
|
||||
{"use_uncompressed_cache", boolQS},
|
||||
{"replace_running_query", boolQS},
|
||||
{"distributed_directory_monitor_batch_inserts", boolQS},
|
||||
{"optimize_move_to_prewhere", boolQS},
|
||||
{"compile", boolQS},
|
||||
{"allow_suspicious_low_cardinality_types", boolQS},
|
||||
{"compile_expressions", boolQS},
|
||||
{"distributed_aggregation_memory_efficient", boolQS},
|
||||
{"skip_unavailable_shards", boolQS},
|
||||
{"distributed_group_by_no_merge", boolQS},
|
||||
{"optimize_skip_unused_shards", boolQS},
|
||||
{"merge_tree_uniform_read_distribution", boolQS},
|
||||
{"force_index_by_date", boolQS},
|
||||
{"force_primary_key", boolQS},
|
||||
{"log_queries", boolQS},
|
||||
{"insert_deduplicate", boolQS},
|
||||
{"enable_http_compression", boolQS},
|
||||
{"http_native_compression_disable_checksumming_on_decompress", boolQS},
|
||||
{"output_format_write_statistics", boolQS},
|
||||
{"add_http_cors_header", boolQS},
|
||||
{"input_format_skip_unknown_fields", boolQS},
|
||||
{"input_format_with_names_use_header", boolQS},
|
||||
{"input_format_import_nested_json", boolQS},
|
||||
{"input_format_defaults_for_omitted_fields", boolQS},
|
||||
{"input_format_values_interpret_expressions", boolQS},
|
||||
{"output_format_json_quote_64bit_integers", boolQS},
|
||||
{"output_format_json_quote_denormals", boolQS},
|
||||
{"output_format_json_escape_forward_slashes", boolQS},
|
||||
{"output_format_pretty_color", boolQS},
|
||||
{"use_client_time_zone", boolQS},
|
||||
{"send_progress_in_http_headers", boolQS},
|
||||
{"fsync_metadata", boolQS},
|
||||
{"join_use_nulls", boolQS},
|
||||
{"fallback_to_stale_replicas_for_distributed_queries", boolQS},
|
||||
{"insert_distributed_sync", boolQS},
|
||||
{"insert_allow_materialized_columns", boolQS},
|
||||
{"optimize_throw_if_noop", boolQS},
|
||||
{"use_index_for_in_with_subqueries", boolQS},
|
||||
{"empty_result_for_aggregation_by_empty_set", boolQS},
|
||||
{"allow_distributed_ddl", boolQS},
|
||||
{"join_any_take_last_row", boolQS},
|
||||
{"format_csv_allow_single_quotes", boolQS},
|
||||
{"format_csv_allow_double_quotes", boolQS},
|
||||
{"log_profile_events", boolQS},
|
||||
{"log_query_settings", boolQS},
|
||||
{"log_query_threads", boolQS},
|
||||
{"enable_optimize_predicate_expression", boolQS},
|
||||
{"low_cardinality_use_single_dictionary_for_part", boolQS},
|
||||
{"decimal_check_overflow", boolQS},
|
||||
{"prefer_localhost_replica", boolQS},
|
||||
{"asterisk_left_columns_only", boolQS},
|
||||
{"calculate_text_stack_trace", boolQS},
|
||||
{"allow_ddl", boolQS},
|
||||
{"parallel_view_processing", boolQS},
|
||||
{"enable_debug_queries", boolQS},
|
||||
{"enable_unaligned_array_join", boolQS},
|
||||
{"low_cardinality_allow_in_native_format", boolQS},
|
||||
{"allow_experimental_multiple_joins_emulation", boolQS},
|
||||
{"allow_experimental_cross_to_join_conversion", boolQS},
|
||||
{"cancel_http_readonly_queries_on_client_close", boolQS},
|
||||
{"external_table_functions_use_nulls", boolQS},
|
||||
{"allow_experimental_data_skipping_indices", boolQS},
|
||||
{"allow_hyperscan", boolQS},
|
||||
{"allow_simdjson", boolQS},
|
||||
|
||||
{"connect_timeout", timeQS},
|
||||
{"connect_timeout_with_failover_ms", timeQS},
|
||||
{"receive_timeout", timeQS},
|
||||
{"send_timeout", timeQS},
|
||||
{"tcp_keep_alive_timeout", timeQS},
|
||||
{"queue_max_wait_ms", timeQS},
|
||||
{"distributed_directory_monitor_sleep_time_ms", timeQS},
|
||||
{"insert_quorum_timeout", timeQS},
|
||||
{"read_backoff_min_latency_ms", timeQS},
|
||||
{"read_backoff_min_interval_between_events_ms", timeQS},
|
||||
{"stream_flush_interval_ms", timeQS},
|
||||
{"stream_poll_timeout_ms", timeQS},
|
||||
{"http_connection_timeout", timeQS},
|
||||
{"http_send_timeout", timeQS},
|
||||
{"http_receive_timeout", timeQS},
|
||||
{"max_execution_time", timeQS},
|
||||
{"timeout_before_checking_execution_speed", timeQS},
|
||||
}
|
||||
|
||||
type querySettingValueEncoder func(enc *binary.Encoder) error
|
||||
|
||||
type querySettings struct {
|
||||
settings map[string]querySettingValueEncoder
|
||||
settingsStr string // used for debug output
|
||||
}
|
||||
|
||||
func makeQuerySettings(query url.Values) (*querySettings, error) {
|
||||
qs := &querySettings{
|
||||
settings: make(map[string]querySettingValueEncoder),
|
||||
settingsStr: "",
|
||||
}
|
||||
|
||||
for _, info := range querySettingList {
|
||||
valueStr := query.Get(info.name)
|
||||
if valueStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch info.qsType {
|
||||
case uintQS, intQS, timeQS:
|
||||
value, err := strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
case boolQS:
|
||||
valueBool, err := strconv.ParseBool(valueStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value := uint64(0)
|
||||
if valueBool {
|
||||
value = 1
|
||||
}
|
||||
qs.settings[info.name] = func(enc *binary.Encoder) error { return enc.Uvarint(value) }
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("query setting %s has unsupported data type", info.name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if qs.settingsStr != "" {
|
||||
qs.settingsStr += "&"
|
||||
}
|
||||
qs.settingsStr += info.name + "=" + valueStr
|
||||
}
|
||||
|
||||
return qs, nil
|
||||
}
|
||||
|
||||
func (qs *querySettings) IsEmpty() bool {
|
||||
return len(qs.settings) == 0
|
||||
}
|
||||
|
||||
func (qs *querySettings) Serialize(enc *binary.Encoder) error {
|
||||
for name, fn := range qs.settings {
|
||||
if err := enc.String(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(enc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
8
vendor/github.com/kshvakov/clickhouse/result.go
generated
vendored
Normal file
8
vendor/github.com/kshvakov/clickhouse/result.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package clickhouse
|
||||
|
||||
import "errors"
|
||||
|
||||
type result struct{}
|
||||
|
||||
func (*result) LastInsertId() (int64, error) { return 0, errors.New("LastInsertId is not supported") }
|
||||
func (*result) RowsAffected() (int64, error) { return 0, errors.New("RowsAffected is not supported") }
|
163
vendor/github.com/kshvakov/clickhouse/rows.go
generated
vendored
Normal file
163
vendor/github.com/kshvakov/clickhouse/rows.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/column"
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
"github.com/kshvakov/clickhouse/lib/protocol"
|
||||
)
|
||||
|
||||
type rows struct {
|
||||
ch *clickhouse
|
||||
err error
|
||||
mutex sync.RWMutex
|
||||
finish func()
|
||||
offset int
|
||||
block *data.Block
|
||||
totals *data.Block
|
||||
extremes *data.Block
|
||||
stream chan *data.Block
|
||||
columns []string
|
||||
blockColumns []column.Column
|
||||
}
|
||||
|
||||
func (rows *rows) Columns() []string {
|
||||
return rows.columns
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeScanType(idx int) reflect.Type {
|
||||
return rows.blockColumns[idx].ScanType()
|
||||
}
|
||||
|
||||
func (rows *rows) ColumnTypeDatabaseTypeName(idx int) string {
|
||||
return rows.blockColumns[idx].CHType()
|
||||
}
|
||||
|
||||
func (rows *rows) Next(dest []driver.Value) error {
|
||||
if rows.block == nil || int(rows.block.NumRows) <= rows.offset {
|
||||
switch block, ok := <-rows.stream; true {
|
||||
case !ok:
|
||||
if err := rows.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
return io.EOF
|
||||
default:
|
||||
rows.block = block
|
||||
rows.offset = 0
|
||||
}
|
||||
}
|
||||
for i := range dest {
|
||||
dest[i] = rows.block.Values[i][rows.offset]
|
||||
}
|
||||
rows.offset++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) HasNextResultSet() bool {
|
||||
return rows.totals != nil || rows.extremes != nil
|
||||
}
|
||||
|
||||
func (rows *rows) NextResultSet() error {
|
||||
switch {
|
||||
case rows.totals != nil:
|
||||
rows.block = rows.totals
|
||||
rows.offset = 0
|
||||
rows.totals = nil
|
||||
case rows.extremes != nil:
|
||||
rows.block = rows.extremes
|
||||
rows.offset = 0
|
||||
rows.extremes = nil
|
||||
default:
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) receiveData() error {
|
||||
defer close(rows.stream)
|
||||
var (
|
||||
err error
|
||||
packet uint64
|
||||
progress *progress
|
||||
profileInfo *profileInfo
|
||||
)
|
||||
for {
|
||||
if packet, err = rows.ch.decoder.Uvarint(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerException:
|
||||
rows.ch.logf("[rows] <- exception")
|
||||
return rows.setError(rows.ch.exception())
|
||||
case protocol.ServerProgress:
|
||||
if progress, err = rows.ch.progress(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- progress: rows=%d, bytes=%d, total rows=%d",
|
||||
progress.rows,
|
||||
progress.bytes,
|
||||
progress.totalRows,
|
||||
)
|
||||
case protocol.ServerProfileInfo:
|
||||
if profileInfo, err = rows.ch.profileInfo(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- profiling: rows=%d, bytes=%d, blocks=%d", profileInfo.rows, profileInfo.bytes, profileInfo.blocks)
|
||||
case protocol.ServerData, protocol.ServerTotals, protocol.ServerExtremes:
|
||||
var (
|
||||
block *data.Block
|
||||
begin = time.Now()
|
||||
)
|
||||
if block, err = rows.ch.readBlock(); err != nil {
|
||||
return rows.setError(err)
|
||||
}
|
||||
rows.ch.logf("[rows] <- data: packet=%d, columns=%d, rows=%d, elapsed=%s", packet, block.NumColumns, block.NumRows, time.Since(begin))
|
||||
if block.NumRows == 0 {
|
||||
continue
|
||||
}
|
||||
switch packet {
|
||||
case protocol.ServerData:
|
||||
rows.stream <- block
|
||||
case protocol.ServerTotals:
|
||||
rows.totals = block
|
||||
case protocol.ServerExtremes:
|
||||
rows.extremes = block
|
||||
}
|
||||
case protocol.ServerEndOfStream:
|
||||
rows.ch.logf("[rows] <- end of stream")
|
||||
return nil
|
||||
default:
|
||||
rows.ch.conn.Close()
|
||||
rows.ch.logf("[rows] unexpected packet [%d]", packet)
|
||||
return rows.setError(fmt.Errorf("[rows] unexpected packet [%d] from server", packet))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rows *rows) Close() error {
|
||||
rows.ch.logf("[rows] close")
|
||||
rows.columns = nil
|
||||
for range rows.stream {
|
||||
}
|
||||
rows.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rows *rows) error() error {
|
||||
rows.mutex.RLock()
|
||||
defer rows.mutex.RUnlock()
|
||||
return rows.err
|
||||
}
|
||||
|
||||
func (rows *rows) setError(err error) error {
|
||||
rows.mutex.Lock()
|
||||
rows.err = err
|
||||
rows.mutex.Unlock()
|
||||
return err
|
||||
}
|
185
vendor/github.com/kshvakov/clickhouse/stmt.go
generated
vendored
Normal file
185
vendor/github.com/kshvakov/clickhouse/stmt.go
generated
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"unicode"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
)
|
||||
|
||||
type stmt struct {
|
||||
ch *clickhouse
|
||||
query string
|
||||
counter int
|
||||
numInput int
|
||||
isInsert bool
|
||||
}
|
||||
|
||||
var emptyResult = &result{}
|
||||
|
||||
func (stmt *stmt) NumInput() int {
|
||||
switch {
|
||||
case stmt.ch.block != nil:
|
||||
return len(stmt.ch.block.Columns)
|
||||
case stmt.numInput < 0:
|
||||
return 0
|
||||
}
|
||||
return stmt.numInput
|
||||
}
|
||||
|
||||
func (stmt *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
return stmt.execContext(context.Background(), args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs := make([]driver.Value, len(args))
|
||||
for i, nv := range args {
|
||||
dargs[i] = nv.Value
|
||||
}
|
||||
return stmt.execContext(ctx, dargs)
|
||||
}
|
||||
|
||||
func (stmt *stmt) execContext(ctx context.Context, args []driver.Value) (driver.Result, error) {
|
||||
if stmt.isInsert {
|
||||
stmt.counter++
|
||||
if err := stmt.ch.block.AppendRow(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if (stmt.counter % stmt.ch.blockSize) == 0 {
|
||||
stmt.ch.logf("[exec] flush block")
|
||||
if err := stmt.ch.writeBlock(stmt.ch.block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.encoder.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
if err := stmt.ch.sendQuery(stmt.bind(convertOldArgs(args))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := stmt.ch.process(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return emptyResult, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
return stmt.queryContext(context.Background(), convertOldArgs(args))
|
||||
}
|
||||
|
||||
func (stmt *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
return stmt.queryContext(ctx, args)
|
||||
}
|
||||
|
||||
func (stmt *stmt) queryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
finish := stmt.ch.watchCancel(ctx)
|
||||
if err := stmt.ch.sendQuery(stmt.bind(args)); err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
meta, err := stmt.ch.readMeta()
|
||||
if err != nil {
|
||||
finish()
|
||||
return nil, err
|
||||
}
|
||||
rows := rows{
|
||||
ch: stmt.ch,
|
||||
finish: finish,
|
||||
stream: make(chan *data.Block, 50),
|
||||
columns: meta.ColumnNames(),
|
||||
blockColumns: meta.Columns,
|
||||
}
|
||||
go rows.receiveData()
|
||||
return &rows, nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) Close() error {
|
||||
stmt.ch.logf("[stmt] close")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) bind(args []driver.NamedValue) string {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
index int
|
||||
keyword bool
|
||||
inBetween bool
|
||||
like = newMatcher("like")
|
||||
limit = newMatcher("limit")
|
||||
between = newMatcher("between")
|
||||
and = newMatcher("and")
|
||||
)
|
||||
switch {
|
||||
case stmt.NumInput() != 0:
|
||||
reader := bytes.NewReader([]byte(stmt.query))
|
||||
for {
|
||||
if char, _, err := reader.ReadRune(); err == nil {
|
||||
switch char {
|
||||
case '@':
|
||||
if param := paramParser(reader); len(param) != 0 {
|
||||
for _, v := range args {
|
||||
if len(v.Name) != 0 && v.Name == param {
|
||||
buf.WriteString(quote(v.Value))
|
||||
}
|
||||
}
|
||||
}
|
||||
case '?':
|
||||
if keyword && index < len(args) && len(args[index].Name) == 0 {
|
||||
buf.WriteString(quote(args[index].Value))
|
||||
index++
|
||||
} else {
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case
|
||||
char == '=',
|
||||
char == '<',
|
||||
char == '>',
|
||||
char == '(',
|
||||
char == ',',
|
||||
char == '+',
|
||||
char == '-',
|
||||
char == '*',
|
||||
char == '/',
|
||||
char == '[':
|
||||
keyword = true
|
||||
default:
|
||||
if limit.matchRune(char) || like.matchRune(char) {
|
||||
keyword = true
|
||||
} else if between.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = true
|
||||
} else if inBetween && and.matchRune(char) {
|
||||
keyword = true
|
||||
inBetween = false
|
||||
} else {
|
||||
keyword = keyword && unicode.IsSpace(char)
|
||||
}
|
||||
}
|
||||
buf.WriteRune(char)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
buf.WriteString(stmt.query)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func convertOldArgs(args []driver.Value) []driver.NamedValue {
|
||||
dargs := make([]driver.NamedValue, len(args))
|
||||
for i, v := range args {
|
||||
dargs[i] = driver.NamedValue{
|
||||
Ordinal: i + 1,
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
return dargs
|
||||
}
|
45
vendor/github.com/kshvakov/clickhouse/tls_config.go
generated
vendored
Normal file
45
vendor/github.com/kshvakov/clickhouse/tls_config.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Based on the original implementation in the project go-sql-driver/mysql:
|
||||
// https://github.com/go-sql-driver/mysql/blob/master/utils.go
|
||||
|
||||
var (
|
||||
tlsConfigLock sync.RWMutex
|
||||
tlsConfigRegistry map[string]*tls.Config
|
||||
)
|
||||
|
||||
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
|
||||
func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry == nil {
|
||||
tlsConfigRegistry = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegistry[key] = config
|
||||
tlsConfigLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegistry != nil {
|
||||
delete(tlsConfigRegistry, key)
|
||||
}
|
||||
tlsConfigLock.Unlock()
|
||||
}
|
||||
|
||||
func getTLSConfigClone(key string) (config *tls.Config) {
|
||||
tlsConfigLock.RLock()
|
||||
if v, ok := tlsConfigRegistry[key]; ok {
|
||||
config = v.Clone()
|
||||
}
|
||||
tlsConfigLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
31
vendor/github.com/kshvakov/clickhouse/word_matcher.go
generated
vendored
Normal file
31
vendor/github.com/kshvakov/clickhouse/word_matcher.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// wordMatcher is a simple automata to match a single word (case insensitive)
|
||||
type wordMatcher struct {
|
||||
word []rune
|
||||
position uint8
|
||||
}
|
||||
|
||||
// newMatcher returns matcher for word needle
|
||||
func newMatcher(needle string) *wordMatcher {
|
||||
return &wordMatcher{word: []rune(strings.ToUpper(needle)),
|
||||
position: 0}
|
||||
}
|
||||
|
||||
func (m *wordMatcher) matchRune(r rune) bool {
|
||||
if m.word[m.position] == unicode.ToUpper(r) {
|
||||
if m.position == uint8(len(m.word)-1) {
|
||||
m.position = 0
|
||||
return true
|
||||
}
|
||||
m.position++
|
||||
} else {
|
||||
m.position = 0
|
||||
}
|
||||
return false
|
||||
}
|
54
vendor/github.com/kshvakov/clickhouse/write_column.go
generated
vendored
Normal file
54
vendor/github.com/kshvakov/clickhouse/write_column.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"time"
|
||||
|
||||
"github.com/kshvakov/clickhouse/lib/data"
|
||||
)
|
||||
|
||||
// Interface for Clickhouse driver
|
||||
type Clickhouse interface {
|
||||
Block() (*data.Block, error)
|
||||
Prepare(query string) (driver.Stmt, error)
|
||||
Begin() (driver.Tx, error)
|
||||
Commit() error
|
||||
Rollback() error
|
||||
Close() error
|
||||
WriteBlock(block *data.Block) error
|
||||
}
|
||||
|
||||
// Interface for Block allowing writes to individual columns
|
||||
type ColumnWriter interface {
|
||||
WriteDate(c int, v time.Time) error
|
||||
WriteDateTime(c int, v time.Time) error
|
||||
WriteUInt8(c int, v uint8) error
|
||||
WriteUInt16(c int, v uint16) error
|
||||
WriteUInt32(c int, v uint32) error
|
||||
WriteUInt64(c int, v uint64) error
|
||||
WriteFloat32(c int, v float32) error
|
||||
WriteFloat64(c int, v float64) error
|
||||
WriteBytes(c int, v []byte) error
|
||||
WriteArray(c int, v interface{}) error
|
||||
WriteString(c int, v string) error
|
||||
WriteFixedString(c int, v []byte) error
|
||||
}
|
||||
|
||||
func OpenDirect(dsn string) (Clickhouse, error) {
|
||||
return open(dsn)
|
||||
}
|
||||
|
||||
func (ch *clickhouse) Block() (*data.Block, error) {
|
||||
if ch.block == nil {
|
||||
return nil, sql.ErrTxDone
|
||||
}
|
||||
return ch.block, nil
|
||||
}
|
||||
|
||||
func (ch *clickhouse) WriteBlock(block *data.Block) error {
|
||||
if block == nil {
|
||||
return sql.ErrTxDone
|
||||
}
|
||||
return ch.writeBlock(block)
|
||||
}
|
Reference in New Issue
Block a user