Showing
482 changed files
with
5034 additions
and
0 deletions
Gopkg.lock
0 → 100644
1 | +memo = "efe4a26b5775ea537c0383b685d50fa64ee8fa6eec77406c5326d5f54744423f" | ||
2 | + | ||
3 | +[[projects]] | ||
4 | + branch = "master" | ||
5 | + name = "github.com/influxdata/influxdb" | ||
6 | + packages = ["client/v2","models","pkg/escape"] | ||
7 | + revision = "31db9d6f468239346a1fe7464b5cf9c85580488f" | ||
8 | + source= "http://git.ukko.mx/UkkoGo/influx_client.git" |
Gopkg.toml
0 → 100644
1 | + | ||
2 | +## Gopkg.toml example (these lines may be deleted) | ||
3 | + | ||
4 | +## "required" lists a set of packages (not projects) that must be included in | ||
5 | +## Gopkg.lock. This list is merged with the set of packages imported by the current | ||
6 | +## project. Use it when your project needs a package it doesn't explicitly import - | ||
7 | +## including "main" packages. | ||
8 | +# required = ["github.com/user/thing/cmd/thing"] | ||
9 | + | ||
10 | +## "ignored" lists a set of packages (not projects) that are ignored when | ||
11 | +## dep statically analyzes source code. Ignored packages can be in this project, | ||
12 | +## or in a dependency. | ||
13 | +# ignored = ["github.com/user/project/badpkg"] | ||
14 | + | ||
15 | +## Dependencies define constraints on dependent projects. They are respected by | ||
16 | +## dep whether coming from the Gopkg.toml of the current project or a dependency. | ||
17 | +# [[dependencies]] | ||
18 | +## Required: the root import path of the project being constrained. | ||
19 | +# name = "github.com/user/project" | ||
20 | +# | ||
21 | +## Recommended: the version constraint to enforce for the project. | ||
22 | +## Only one of "branch", "version" or "revision" can be specified. | ||
23 | +# version = "1.0.0" | ||
24 | +# branch = "master" | ||
25 | +# revision = "abc123" | ||
26 | +# | ||
27 | +## Optional: an alternate location (URL or import path) for the project's source. | ||
28 | +# source = "https://github.com/myfork/package.git" | ||
29 | + | ||
30 | +## Overrides have the same structure as [[dependencies]], but supercede all | ||
31 | +## [[dependencies]] declarations from all projects. Only the current project's | ||
32 | +## [[overrides]] are applied. | ||
33 | +## | ||
34 | +## Overrides are a sledgehammer. Use them only as a last resort. | ||
35 | +# [[overrides]] | ||
36 | +## Required: the root import path of the project being constrained. | ||
37 | +# name = "github.com/user/project" | ||
38 | +# | ||
39 | +## Optional: specifying a version constraint override will cause all other | ||
40 | +## constraints on this project to be ignored; only the overriden constraint | ||
41 | +## need be satisfied. | ||
42 | +## Again, only one of "branch", "version" or "revision" can be specified. | ||
43 | +# version = "1.0.0" | ||
44 | +# branch = "master" | ||
45 | +# revision = "abc123" | ||
46 | +# | ||
47 | +## Optional: specifying an alternate source location as an override will | ||
48 | +## enforce that the alternate location is used for that project, regardless of | ||
49 | +## what source location any dependent projects specify. | ||
50 | +# source = "https://github.com/myfork/package.git" | ||
51 | + | ||
52 | + | ||
53 | + | ||
54 | +[[dependencies]] | ||
55 | + branch = "master" | ||
56 | + name = "github.com/influxdata/UkkoGo/influxdb" | ||
57 | + source= "http://git.ukko.mx/UkkoGo/influx_client.git" |
influx_client.go
0 → 100644
1 | +package influx_client | ||
2 | + | ||
3 | +import( | ||
4 | + influx "github.com/influxdata/influxdb/client/v2" | ||
5 | + "time" | ||
6 | +) | ||
7 | + | ||
8 | + | ||
9 | +type InfluxClient struct { | ||
10 | + Client influx.Client | ||
11 | + InfluxErr error | ||
12 | + Db string | ||
13 | +} | ||
14 | + | ||
15 | +func CreateClient(db string, username string, password string, dbAddress string) *InfluxClient { | ||
16 | + i := InfluxClient {} | ||
17 | + i.Db = db | ||
18 | + i.Client, i.InfluxErr = influx.NewHTTPClient(influx.HTTPConfig{ | ||
19 | + Addr: dbAddress, | ||
20 | + Username: username, | ||
21 | + Password: password, | ||
22 | + }) | ||
23 | + if i.InfluxErr != nil { | ||
24 | + panic(i.InfluxErr) | ||
25 | + } | ||
26 | + return &i | ||
27 | +} | ||
28 | + | ||
29 | +func (i *InfluxClient) CreatePoint(tableName string, tags map[string]string, fields map[string]interface{}, timestamp time.Time) { | ||
30 | + bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{ | ||
31 | + Database: i.Db, | ||
32 | + Precision: "s", | ||
33 | + }) | ||
34 | + | ||
35 | + pt, err := influx.NewPoint(tableName, tags, fields, timestamp) | ||
36 | + if err != nil { | ||
37 | + panic(err) | ||
38 | + // log.Fatal(err) | ||
39 | + } | ||
40 | + bp.AddPoint(pt) | ||
41 | + | ||
42 | + // Write the batch | ||
43 | + if err := i.Client.Write(bp); err != nil { | ||
44 | + panic(err) | ||
45 | + } | ||
46 | +} |
1 | +build |
1 | +### Directions | ||
2 | +_GitHub Issues are reserved for actionable bug reports and feature requests._ | ||
3 | +_General questions should be sent to the [InfluxDB Community Site](https://community.influxdata.com)._ | ||
4 | + | ||
5 | +_Before opening an issue, search for similar bug reports or feature requests on GitHub Issues._ | ||
6 | +_If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below. | ||
7 | +_Erase the other section and everything on and above this line._ | ||
8 | + | ||
9 | +### Bug report | ||
10 | + | ||
11 | +__System info:__ [Include InfluxDB version, operating system name, and other relevant details] | ||
12 | + | ||
13 | +__Steps to reproduce:__ | ||
14 | + | ||
15 | +1. [First Step] | ||
16 | +2. [Second Step] | ||
17 | +3. [and so on...] | ||
18 | + | ||
19 | +__Expected behavior:__ [What you expected to happen] | ||
20 | + | ||
21 | +__Actual behavior:__ [What actually happened] | ||
22 | + | ||
23 | +__Additional info:__ [Include gist of relevant config, logs, etc.] | ||
24 | + | ||
25 | +Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team. | ||
26 | + | ||
27 | +``` | ||
28 | +curl -o block.txt "http://localhost:8086/debug/pprof/block?debug=1" | ||
29 | +curl -o goroutine.txt "http://localhost:8086/debug/pprof/goroutine?debug=1" | ||
30 | +curl -o heap.txt "http://localhost:8086/debug/pprof/heap?debug=1" | ||
31 | +curl -o vars.txt "http://localhost:8086/debug/vars" | ||
32 | +iostat -xd 1 30 > iostat.txt | ||
33 | +influx -execute "show shards" > shards.txt | ||
34 | +influx -execute "show stats" > stats.txt | ||
35 | +influx -execute "show diagnostics" > diagnostics.txt | ||
36 | +``` | ||
37 | + | ||
38 | +Please run those if possible and link them from a [gist](http://gist.github.com). | ||
39 | + | ||
40 | +*Please note, the quickest way to fix a bug is to open a Pull Request.* | ||
41 | + | ||
42 | + | ||
43 | +### Feature Request | ||
44 | + | ||
45 | +Opening a feature request kicks off a discussion. | ||
46 | +Requests may be closed if we're not actively planning to work on them. | ||
47 | + | ||
48 | +__Proposal:__ [Description of the feature] | ||
49 | + | ||
50 | +__Current behavior:__ [What currently happens] | ||
51 | + | ||
52 | +__Desired behavior:__ [What you would like to happen] | ||
53 | + | ||
54 | +__Use case:__ [Why is this important (helps with prioritizing requests)] |
1 | +###### Required for all non-trivial PRs | ||
2 | +- [ ] Rebased/mergable | ||
3 | +- [ ] Tests pass | ||
4 | +- [ ] CHANGELOG.md updated | ||
5 | +- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) | ||
6 | + | ||
7 | +###### Required only if applicable | ||
8 | +_You can erase any checkboxes below this note if they are not applicable to your Pull Request._ | ||
9 | +- [ ] [InfluxQL Spec](https://github.com/influxdata/influxdb/blob/master/influxql/README.md) updated | ||
10 | +- [ ] Provide example syntax | ||
11 | +- [ ] Update man page when modifying a command | ||
12 | +- [ ] Config changes: update sample config (`etc/config.sample.toml`), server `NewDemoConfig` method, and `Diagnostics` methods reporting config settings, if necessary | ||
13 | +- [ ] [InfluxData Documentation](https://github.com/influxdata/docs.influxdata.com): issue filed or pull request submitted \<link to issue or pull request\> |
1 | +# Keep editor-specific, non-project specific ignore rules in global .gitignore: | ||
2 | +# https://help.github.com/articles/ignoring-files/#create-a-global-gitignore | ||
3 | + | ||
4 | +*~ | ||
5 | +src/ | ||
6 | + | ||
7 | +config.json | ||
8 | +/bin/ | ||
9 | + | ||
10 | +/query/a.out* | ||
11 | + | ||
12 | +# ignore generated files. | ||
13 | +cmd/influxd/version.go | ||
14 | + | ||
15 | +# executables | ||
16 | + | ||
17 | +*.test | ||
18 | + | ||
19 | +influx_tsm | ||
20 | +**/influx_tsm | ||
21 | +!**/influx_tsm/ | ||
22 | + | ||
23 | +influx_stress | ||
24 | +**/influx_stress | ||
25 | +!**/influx_stress/ | ||
26 | + | ||
27 | +influxd | ||
28 | +**/influxd | ||
29 | +!**/influxd/ | ||
30 | + | ||
31 | +influx | ||
32 | +**/influx | ||
33 | +!**/influx/ | ||
34 | + | ||
35 | +influxdb | ||
36 | +**/influxdb | ||
37 | +!**/influxdb/ | ||
38 | + | ||
39 | +influx_inspect | ||
40 | +**/influx_inspect | ||
41 | +!**/influx_inspect/ | ||
42 | + | ||
43 | +/benchmark-tool | ||
44 | +/main | ||
45 | +/benchmark-storage | ||
46 | +godef | ||
47 | +gosym | ||
48 | +gocode | ||
49 | +inspect-raft | ||
50 | + | ||
51 | +# dependencies | ||
52 | +out_rpm/ | ||
53 | +packages/ | ||
54 | + | ||
55 | +# autconf | ||
56 | +autom4te.cache/ | ||
57 | +config.log | ||
58 | +config.status | ||
59 | + | ||
60 | +# log file | ||
61 | +influxdb.log | ||
62 | +benchmark.log | ||
63 | + | ||
64 | +# config file | ||
65 | +config.toml | ||
66 | + | ||
67 | +# test data files | ||
68 | +integration/migration_data/ | ||
69 | + | ||
70 | +# man outputs | ||
71 | +man/*.xml | ||
72 | +man/*.1 | ||
73 | +man/*.1.gz | ||
74 | + | ||
75 | +# test outputs | ||
76 | +/test-results.xml |
1 | +#!/usr/bin/env bash | ||
2 | + | ||
3 | +fmtcount=`git ls-files | grep '.go$' | xargs gofmt -l 2>&1 | wc -l` | ||
4 | +if [ $fmtcount -gt 0 ]; then | ||
5 | + echo "Some files aren't formatted, please run 'go fmt ./...' to format your source code before committing" | ||
6 | + exit 1 | ||
7 | +fi | ||
8 | + | ||
9 | +vetcount=`go tool vet ./ 2>&1 | wc -l` | ||
10 | +if [ $vetcount -gt 0 ]; then | ||
11 | + echo "Some files aren't passing vet heuristics, please run 'go vet ./...' to see the errors it flags and correct your source code before committing" | ||
12 | + exit 1 | ||
13 | +fi | ||
14 | +exit 0 | ||
15 | + | ||
16 | +# Ensure FIXME lines are removed before commit. | ||
17 | +fixme_lines=$(git diff --cached | grep ^+ | grep -v pre-commit | grep FIXME | sed 's_^+\s*__g') | ||
18 | +if [ "$fixme_lines" != "" ]; then | ||
19 | + echo "Please remove the following lines:" | ||
20 | + echo -e "$fixme_lines" | ||
21 | + exit 1 | ||
22 | +fi | ||
23 | + |
This diff could not be displayed because it is too large.
1 | +_This document is currently in draft form._ | ||
2 | + | ||
3 | +# Background | ||
4 | + | ||
5 | +The goal of this guide is to capture some Do and Don'ts of Go code for the InfluxDB database. When it comes to Go, writing good code is often achieved with the help of tools like `go fmt` and `go vet`. However there are still some practices not enforceable by any tools. This guide lists some specific practices to follow when writing code for the database. | ||
6 | + | ||
7 | +*Like everything, one needs to use good judgment.* There will always be times when it doesn't make sense to follow a guideline outlined in this document. If that case arises, be ready to justify your choices. | ||
8 | + | ||
9 | +# The Guidelines | ||
10 | + | ||
11 | +## Try not to use third-party libraries | ||
12 | + | ||
13 | +A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) in some storage engines. So to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use. | ||
14 | + | ||
15 | +For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). | ||
16 | + | ||
17 | +## Always include a default case in a 'switch' statement | ||
18 | +The lack of a `default` case in a `switch` statement can be a significant source of bugs. This is particularly true in the case of a type-assertions switch. So always include a `default` statement unless you have an explicit reason not to. | ||
19 | + | ||
20 | +## When -- and when not -- set a channel to 'nil' | ||
21 | + | ||
22 | +## Use defer with anonymous functions to handle complex locking | ||
23 | +Consider a block of code like the following. | ||
24 | +``` | ||
25 | + mu.Lock() | ||
26 | + if foo == "quit" { | ||
27 | + mu.Unlock() | ||
28 | + return | ||
29 | + } else if foo == "continue" { | ||
30 | + if bar == "quit" { | ||
31 | + mu.Unlock() | ||
32 | + return | ||
33 | + } | ||
34 | + bar = "still going" | ||
35 | + } else { | ||
36 | + qux = "here at last" | ||
37 | + mu.Unlock() | ||
38 | + return | ||
39 | + } | ||
40 | + foo = "more to do" | ||
41 | + bar = "still more to do" | ||
42 | + mu.Unlock() | ||
43 | + | ||
44 | + qux = "finished now" | ||
45 | + return | ||
46 | +``` | ||
47 | +While this is obviously contrived, complex lock control like this is sometimes required, and doesn't lend itself to `defer`. But as the code evolves, it's easy to introduce new cases, and forget to release locks. One way to address this is to use an anonymous function like so: | ||
48 | +``` | ||
49 | + more := func() bool { | ||
50 | + mu.Lock() | ||
51 | + defer mu.Unlock() | ||
52 | + if foo == "quit" { | ||
53 | + return false | ||
54 | + } else if foo == "continue" { | ||
55 | + if bar == "quit" { | ||
56 | + return false | ||
57 | + } | ||
58 | + bar = "still going" | ||
59 | + } else { | ||
60 | + qux = "here at last" | ||
61 | + return false | ||
62 | + } | ||
63 | + foo = "more to do" | ||
64 | + bar = "still more to do" | ||
65 | + return true | ||
66 | + }() | ||
67 | + | ||
68 | + if more { | ||
69 | + qux = "finished" | ||
70 | + } | ||
71 | + return | ||
72 | +``` | ||
73 | +This allows us to use `defer` but ensures that if any new cases are added to the logic within the anonymous function, the lock will always be released. Another advantage of this approach is that `defer` will still run even in the event of a panic, ensuring the locks will be released even in that case. | ||
74 | + | ||
75 | +## When to call 'panic()' | ||
76 | + | ||
77 | +# Useful links | ||
78 | +- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) | ||
79 | +- [Go in production](http://peter.bourgon.org/go-in-production/) | ||
80 | +- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) | ||
81 | +- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` | ||
82 | + |
This diff is collapsed. Click to expand it.
1 | +# Docker Setup | ||
2 | +======================== | ||
3 | + | ||
4 | +This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. | ||
5 | + | ||
6 | +## Building Image | ||
7 | + | ||
8 | +To build a docker image for InfluxDB from your current checkout, run the following: | ||
9 | + | ||
10 | +``` | ||
11 | +$ ./build-docker.sh | ||
12 | +``` | ||
13 | + | ||
14 | +This script uses the `golang:1.7.4` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. | ||
15 | + | ||
16 | +To build the image using a different version of go: | ||
17 | + | ||
18 | +``` | ||
19 | +$ GO_VER=1.7.4 ./build-docker.sh | ||
20 | +``` | ||
21 | + | ||
22 | +Available version can be found [here](https://hub.docker.com/_/golang/). | ||
23 | + | ||
24 | +## Single Node Container | ||
25 | + | ||
26 | +This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. | ||
27 | + | ||
28 | +``` | ||
29 | +$ docker run -it -p 8086:8086 -p 8088:8088 influxdb | ||
30 | +``` |
1 | +FROM busybox:ubuntu-14.04 | ||
2 | + | ||
3 | +MAINTAINER Jason Wilder "<jason@influxdb.com>" | ||
4 | + | ||
5 | +# admin, http, udp, cluster, graphite, opentsdb, collectd | ||
6 | +EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 | ||
7 | + | ||
8 | +WORKDIR /app | ||
9 | + | ||
10 | +# copy binary into image | ||
11 | +COPY influxd /app/ | ||
12 | + | ||
13 | +# Add influxd to the PATH | ||
14 | +ENV PATH=/app:$PATH | ||
15 | + | ||
16 | +# Generate a default config | ||
17 | +RUN influxd config > /etc/influxdb.toml | ||
18 | + | ||
19 | +# Use /data for all disk storage | ||
20 | +RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml | ||
21 | + | ||
22 | +VOLUME ["/data"] | ||
23 | + | ||
24 | +ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] |
1 | +FROM ioft/i386-ubuntu:14.04 | ||
2 | + | ||
3 | +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ | ||
4 | + python-software-properties \ | ||
5 | + software-properties-common \ | ||
6 | + wget \ | ||
7 | + git \ | ||
8 | + mercurial \ | ||
9 | + make \ | ||
10 | + ruby \ | ||
11 | + ruby-dev \ | ||
12 | + rpm \ | ||
13 | + zip \ | ||
14 | + python \ | ||
15 | + python-boto | ||
16 | + | ||
17 | +RUN gem install fpm | ||
18 | + | ||
19 | +# Install go | ||
20 | +ENV GOPATH /root/go | ||
21 | +ENV GO_VERSION 1.7.4 | ||
22 | +ENV GO_ARCH 386 | ||
23 | +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ | ||
24 | + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ | ||
25 | + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz | ||
26 | +ENV PATH /usr/local/go/bin:$PATH | ||
27 | + | ||
28 | +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb | ||
29 | +ENV PATH $GOPATH/bin:$PATH | ||
30 | +RUN mkdir -p $PROJECT_DIR | ||
31 | +WORKDIR $PROJECT_DIR | ||
32 | + | ||
33 | +VOLUME $PROJECT_DIR | ||
34 | + | ||
35 | +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] |
1 | +FROM ubuntu:trusty | ||
2 | + | ||
3 | +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ | ||
4 | + python-software-properties \ | ||
5 | + software-properties-common \ | ||
6 | + wget \ | ||
7 | + git \ | ||
8 | + mercurial \ | ||
9 | + make \ | ||
10 | + ruby \ | ||
11 | + ruby-dev \ | ||
12 | + rpm \ | ||
13 | + zip \ | ||
14 | + python \ | ||
15 | + python-boto \ | ||
16 | + asciidoc \ | ||
17 | + xmlto \ | ||
18 | + docbook-xsl | ||
19 | + | ||
20 | +RUN gem install fpm | ||
21 | + | ||
22 | +# Install go | ||
23 | +ENV GOPATH /root/go | ||
24 | +ENV GO_VERSION 1.7.4 | ||
25 | +ENV GO_ARCH amd64 | ||
26 | +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ | ||
27 | + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ | ||
28 | + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz | ||
29 | +ENV PATH /usr/local/go/bin:$PATH | ||
30 | + | ||
31 | +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb | ||
32 | +ENV PATH $GOPATH/bin:$PATH | ||
33 | +RUN mkdir -p $PROJECT_DIR | ||
34 | +WORKDIR $PROJECT_DIR | ||
35 | + | ||
36 | +VOLUME $PROJECT_DIR | ||
37 | + | ||
38 | +ENTRYPOINT [ "/root/go/src/github.com/influxdata/influxdb/build.py" ] |
1 | +FROM ubuntu:trusty | ||
2 | + | ||
3 | +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ | ||
4 | + python-software-properties \ | ||
5 | + software-properties-common \ | ||
6 | + wget \ | ||
7 | + git \ | ||
8 | + mercurial \ | ||
9 | + make \ | ||
10 | + ruby \ | ||
11 | + ruby-dev \ | ||
12 | + rpm \ | ||
13 | + zip \ | ||
14 | + python \ | ||
15 | + python-boto | ||
16 | + | ||
17 | +RUN gem install fpm | ||
18 | + | ||
19 | +# Setup env | ||
20 | +ENV GOPATH /root/go | ||
21 | +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/influxdb | ||
22 | +ENV PATH $GOPATH/bin:$PATH | ||
23 | +RUN mkdir -p $PROJECT_DIR | ||
24 | + | ||
25 | +VOLUME $PROJECT_DIR | ||
26 | + | ||
27 | + | ||
28 | +# Install go | ||
29 | +ENV GO_VERSION 1.7.4 | ||
30 | +ENV GO_ARCH amd64 | ||
31 | +RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ | ||
32 | + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ | ||
33 | + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz | ||
34 | + | ||
35 | +# Clone Go tip for compilation | ||
36 | +ENV GOROOT_BOOTSTRAP /usr/local/go | ||
37 | +RUN git clone https://go.googlesource.com/go | ||
38 | +ENV PATH /go/bin:$PATH | ||
39 | + | ||
40 | +# Add script for compiling go | ||
41 | +ENV GO_CHECKOUT master | ||
42 | +ADD ./gobuild.sh /gobuild.sh | ||
43 | +ENTRYPOINT [ "/gobuild.sh" ] |
1 | +FROM 32bit/ubuntu:14.04 | ||
2 | + | ||
3 | +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y python-software-properties software-properties-common git | ||
4 | +RUN add-apt-repository ppa:evarlast/golang1.4 | ||
5 | +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-overwrite" golang-go | ||
6 | + | ||
7 | +ENV GOPATH=/root/go | ||
8 | +RUN mkdir -p /root/go/src/github.com/influxdata/influxdb | ||
9 | +RUN mkdir -p /tmp/artifacts | ||
10 | + | ||
11 | +VOLUME /root/go/src/github.com/influxdata/influxdb | ||
12 | +VOLUME /tmp/artifacts |
vendor/github.com/influxdata/influxdb/Godeps
0 → 100644
1 | +collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e | ||
2 | +github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca | ||
3 | +github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c | ||
4 | +github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda | ||
5 | +github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6 | ||
6 | +github.com/clarkduvall/hyperloglog 2d38f733946d0a1f2e810513c71b834cbeba1480 | ||
7 | +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 | ||
8 | +github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb | ||
9 | +github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef | ||
10 | +github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486 | ||
11 | +github.com/gogo/protobuf a9cd0c35b97daf74d0ebf3514c5254814b2703b4 | ||
12 | +github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 | ||
13 | +github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 | ||
14 | +github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815 | ||
15 | +github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 | ||
16 | +github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac | ||
17 | +github.com/rakyll/statik e383bbf6b2ec1a2fb8492dfd152d945fb88919b6 | ||
18 | +github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d | ||
19 | +github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6 | ||
20 | +github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577 | ||
21 | +golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd |
1 | +The MIT License (MIT) | ||
2 | + | ||
3 | +Copyright (c) 2013-2016 Errplane Inc. | ||
4 | + | ||
5 | +Permission is hereby granted, free of charge, to any person obtaining a copy of | ||
6 | +this software and associated documentation files (the "Software"), to deal in | ||
7 | +the Software without restriction, including without limitation the rights to | ||
8 | +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | ||
9 | +the Software, and to permit persons to whom the Software is furnished to do so, | ||
10 | +subject to the following conditions: | ||
11 | + | ||
12 | +The above copyright notice and this permission notice shall be included in all | ||
13 | +copies or substantial portions of the Software. | ||
14 | + | ||
15 | +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | ||
17 | +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | ||
18 | +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||
19 | +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
20 | +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
1 | +# List | ||
2 | +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) | ||
3 | +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) | ||
4 | +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) | ||
5 | +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) | ||
6 | +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) | ||
7 | +- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) | ||
8 | +- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE) | ||
9 | +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) | ||
10 | +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) | ||
11 | +- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) | ||
12 | +- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) | ||
13 | +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) | ||
14 | +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) | ||
15 | +- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) | ||
16 | +- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) | ||
17 | +- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) | ||
18 | +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) | ||
19 | +- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) | ||
20 | +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) | ||
21 | +- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) | ||
22 | +- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) | ||
23 | +- glyphicons [LICENSE](http://glyphicons.com/license/) | ||
24 | +- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) | ||
25 | +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) | ||
26 | +- react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) |
1 | +PACKAGES=$(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique) | ||
2 | + | ||
3 | +default: | ||
4 | + | ||
5 | +metalint: deadcode cyclo aligncheck defercheck structcheck lint errcheck | ||
6 | + | ||
7 | +deadcode: | ||
8 | + @deadcode $(PACKAGES) 2>&1 | ||
9 | + | ||
10 | +cyclo: | ||
11 | + @gocyclo -over 10 $(PACKAGES) | ||
12 | + | ||
13 | +aligncheck: | ||
14 | + @aligncheck $(PACKAGES) | ||
15 | + | ||
16 | +defercheck: | ||
17 | + @defercheck $(PACKAGES) | ||
18 | + | ||
19 | + | ||
20 | +structcheck: | ||
21 | + @structcheck $(PACKAGES) | ||
22 | + | ||
23 | +lint: | ||
24 | + @for pkg in $(PACKAGES); do golint $$pkg; done | ||
25 | + | ||
26 | +errcheck: | ||
27 | + @for pkg in $(PACKAGES); do \ | ||
28 | + errcheck -ignorepkg=bytes,fmt -ignore=":(Rollback|Close)" $$pkg \ | ||
29 | + done | ||
30 | + | ||
31 | +tools: | ||
32 | + go get github.com/remyoudompheng/go-misc/deadcode | ||
33 | + go get github.com/alecthomas/gocyclo | ||
34 | + go get github.com/opennota/check/... | ||
35 | + go get github.com/golang/lint/golint | ||
36 | + go get github.com/kisielk/errcheck | ||
37 | + go get github.com/sparrc/gdm | ||
38 | + | ||
39 | +.PHONY: default,metalint,deadcode,cyclo,aligncheck,defercheck,structcheck,lint,errcheck,tools |
1 | +The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field key, or tag key appears it should be wrapped in double quotes. | ||
2 | + | ||
3 | +# Databases & retention policies | ||
4 | + | ||
5 | +```sql | ||
6 | +-- create a database | ||
7 | +CREATE DATABASE <name> | ||
8 | + | ||
9 | +-- create a retention policy | ||
10 | +CREATE RETENTION POLICY <rp-name> ON <db-name> DURATION <duration> REPLICATION <n> [DEFAULT] | ||
11 | + | ||
12 | +-- alter retention policy | ||
13 | +ALTER RETENTION POLICY <rp-name> ON <db-name> (DURATION <duration> | REPLICATION <n> | DEFAULT)+ | ||
14 | + | ||
15 | +-- drop a database | ||
16 | +DROP DATABASE <name> | ||
17 | + | ||
18 | +-- drop a retention policy | ||
19 | +DROP RETENTION POLICY <rp-name> ON <db-name> | ||
20 | +``` | ||
21 | +where `<duration>` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `<replication>` must be an integer. | ||
22 | + | ||
23 | +If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. | ||
24 | + | ||
25 | +# Users and permissions | ||
26 | + | ||
27 | +```sql | ||
28 | +-- create user | ||
29 | +CREATE USER <name> WITH PASSWORD '<password>' | ||
30 | + | ||
31 | +-- grant privilege on a database | ||
32 | +GRANT <privilege> ON <db> TO <user> | ||
33 | + | ||
34 | +-- grant cluster admin privileges | ||
35 | +GRANT ALL [PRIVILEGES] TO <user> | ||
36 | + | ||
37 | +-- revoke privilege | ||
38 | +REVOKE <privilege> ON <db> FROM <user> | ||
39 | + | ||
40 | +-- revoke all privileges for a DB | ||
41 | +REVOKE ALL [PRIVILEGES] ON <db> FROM <user> | ||
42 | + | ||
43 | +-- revoke all privileges including cluster admin | ||
44 | +REVOKE ALL [PRIVILEGES] FROM <user> | ||
45 | + | ||
46 | +-- combine db creation with privilege assignment (user must already exist) | ||
47 | +CREATE DATABASE <name> GRANT <privilege> TO <user> | ||
48 | +CREATE DATABASE <name> REVOKE <privilege> FROM <user> | ||
49 | + | ||
50 | +-- delete a user | ||
51 | +DROP USER <name> | ||
52 | + | ||
53 | + | ||
54 | +``` | ||
55 | +where `<privilege> := READ | WRITE | All `. | ||
56 | + | ||
57 | +Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. | ||
58 | + | ||
59 | +By default, newly created users have no privileges to any databases. | ||
60 | + | ||
61 | +Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. | ||
62 | + | ||
63 | +# Select | ||
64 | + | ||
65 | +```sql | ||
66 | +SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) | ||
67 | + | ||
68 | +SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region | ||
69 | +``` | ||
70 | + | ||
71 | +## Group By | ||
72 | + | ||
73 | +# Delete | ||
74 | + | ||
75 | +# Series | ||
76 | + | ||
77 | +## Destroy | ||
78 | + | ||
79 | +```sql | ||
80 | +DROP MEASUREMENT <name> | ||
81 | +DROP MEASUREMENT cpu WHERE region = 'uswest' | ||
82 | +``` | ||
83 | + | ||
84 | +## Show | ||
85 | + | ||
86 | +Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. | ||
87 | + | ||
88 | +```sql | ||
89 | +-- show all databases | ||
90 | +SHOW DATABASES | ||
91 | + | ||
92 | +-- show measurement names | ||
93 | +SHOW MEASUREMENTS | ||
94 | +SHOW MEASUREMENTS LIMIT 15 | ||
95 | +SHOW MEASUREMENTS LIMIT 10 OFFSET 40 | ||
96 | +SHOW MEASUREMENTS WHERE service = 'redis' | ||
97 | +-- LIMIT and OFFSET can be applied to any of the SHOW type queries | ||
98 | + | ||
99 | +-- show all series across all measurements/tagsets | ||
100 | +SHOW SERIES | ||
101 | + | ||
102 | +-- get a show of all series for any measurements where tag key region = tak value 'uswest' | ||
103 | +SHOW SERIES WHERE region = 'uswest' | ||
104 | + | ||
105 | +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 | ||
106 | + | ||
107 | +-- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns | ||
108 | +-- series split into measurements. Each series counts as a row. So you could see only a | ||
109 | +-- single measurement returned, but 10 series within it. | ||
110 | +SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 | ||
111 | + | ||
112 | +-- show all retention policies on a database | ||
113 | +SHOW RETENTION POLICIES ON mydb | ||
114 | + | ||
115 | +-- get a show of all tag keys across all measurements | ||
116 | +SHOW TAG KEYS | ||
117 | + | ||
118 | +-- show all the tag keys for a given measurement | ||
119 | +SHOW TAG KEYS FROM cpu | ||
120 | +SHOW TAG KEYS FROM temperature, wind_speed | ||
121 | + | ||
122 | +-- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required | ||
123 | +SHOW TAG VALUES WITH TAG KEY = 'region' | ||
124 | +SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' | ||
125 | + | ||
126 | +-- and you can do stuff against fields | ||
127 | +SHOW FIELD KEYS FROM cpu | ||
128 | + | ||
129 | +-- but you can't do this | ||
130 | +SHOW FIELD VALUES | ||
131 | +-- we don't index field values, so this query should be invalid. | ||
132 | + | ||
133 | +-- show all users | ||
134 | +SHOW USERS | ||
135 | +``` | ||
136 | + | ||
137 | +Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. | ||
138 | + | ||
139 | +And the show series output looks like this: | ||
140 | + | ||
141 | +```json | ||
142 | +[ | ||
143 | + { | ||
144 | + "name": "cpu", | ||
145 | + "columns": ["id", "region", "host"], | ||
146 | + "values": [ | ||
147 | + 1, "uswest", "servera", | ||
148 | + 2, "uswest", "serverb" | ||
149 | + ] | ||
150 | + }, | ||
151 | + { | ||
152 | + "name": "reponse_time", | ||
153 | + "columns": ["id", "application", "host"], | ||
154 | + "values": [ | ||
155 | + 3, "myRailsApp", "servera" | ||
156 | + ] | ||
157 | + } | ||
158 | +] | ||
159 | +``` | ||
160 | + | ||
161 | +# Continuous Queries | ||
162 | + | ||
163 | +Continuous queries are going to be inspired by MySQL `TRIGGER` syntax: | ||
164 | + | ||
165 | +http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html | ||
166 | + | ||
167 | +Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, | ||
168 | +particularly in the case where creation is scripted. | ||
169 | + | ||
170 | +## Create | ||
171 | + | ||
172 | + CREATE CONTINUOUS QUERY <name> AS SELECT ... FROM ... | ||
173 | + | ||
174 | +## Destroy | ||
175 | + | ||
176 | + DROP CONTINUOUS QUERY <name> | ||
177 | + | ||
178 | +## List | ||
179 | + | ||
180 | + SHOW CONTINUOUS QUERIES |
1 | +# InfluxDB [![Circle CI](https://circleci.com/gh/influxdata/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/influxdata/influxdb)](https://goreportcard.com/report/github.com/influxdata/influxdb) [![Docker pulls](https://img.shields.io/docker/pulls/library/influxdb.svg)](https://hub.docker.com/_/influxdb/) | ||
2 | + | ||
3 | +## An Open-Source Time Series Database | ||
4 | + | ||
5 | +InfluxDB is an open source **time series database** with | ||
6 | +**no external dependencies**. It's useful for recording metrics, | ||
7 | +events, and performing analytics. | ||
8 | + | ||
9 | +## Features | ||
10 | + | ||
11 | +* Built-in [HTTP API](https://docs.influxdata.com/influxdb/latest/guides/writing_data/) so you don't have to write any server side code to get up and running. | ||
12 | +* Data can be tagged, allowing very flexible querying. | ||
13 | +* SQL-like query language. | ||
14 | +* Simple to install and manage, and fast to get data in and out. | ||
15 | +* It aims to answer queries in real-time. That means every data point is | ||
16 | + indexed as it comes in and is immediately available in queries that | ||
17 | + should return in < 100ms. | ||
18 | + | ||
19 | +## Installation | ||
20 | + | ||
21 | +We recommend installing InfluxDB using one of the [pre-built packages](https://influxdata.com/downloads/#influxdb). Then start InfluxDB using: | ||
22 | + | ||
23 | +* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. | ||
24 | +* `systemctl start influxdb` if you have installed InfluxDB using an official Debian or RPM package, and are running a distro with `systemd`. For example, Ubuntu 15 or later. | ||
25 | +* `$GOPATH/bin/influxd` if you have built InfluxDB from source. | ||
26 | + | ||
27 | +## Getting Started | ||
28 | + | ||
29 | +### Create your first database | ||
30 | + | ||
31 | +``` | ||
32 | +curl -XPOST 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" | ||
33 | +``` | ||
34 | + | ||
35 | +### Insert some data | ||
36 | +``` | ||
37 | +curl -XPOST 'http://localhost:8086/write?db=mydb' \ | ||
38 | +-d 'cpu,host=server01,region=uswest load=42 1434055562000000000' | ||
39 | + | ||
40 | +curl -XPOST 'http://localhost:8086/write?db=mydb' \ | ||
41 | +-d 'cpu,host=server02,region=uswest load=78 1434055562000000000' | ||
42 | + | ||
43 | +curl -XPOST 'http://localhost:8086/write?db=mydb' \ | ||
44 | +-d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' | ||
45 | +``` | ||
46 | + | ||
47 | +### Query for the data | ||
48 | +```JSON | ||
49 | +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ | ||
50 | +--data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now() - 1d" | ||
51 | +``` | ||
52 | + | ||
53 | +### Analyze the data | ||
54 | +```JSON | ||
55 | +curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ | ||
56 | +--data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" | ||
57 | +``` | ||
58 | + | ||
59 | +## Documentation | ||
60 | + | ||
61 | +* Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). | ||
62 | +* Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. | ||
63 | +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). | ||
64 | + | ||
65 | +## Contributing | ||
66 | + | ||
67 | +If you're feeling adventurous and want to contribute to InfluxDB, see our [contributing doc](https://github.com/influxdata/influxdb/blob/master/CONTRIBUTING.md) for info on how to make feature requests, build from source, and run tests. | ||
68 | + | ||
69 | +## Looking for Support? | ||
70 | + | ||
71 | +InfluxDB offers a number of services to help your project succeed. We offer Developer Support for organizations in active development, Managed Hosting to make it easy to move into production, and Enterprise Support for companies requiring the best response times, SLAs, and technical fixes. Visit our [support page](https://influxdata.com/services/) or contact [sales@influxdb.com](mailto:sales@influxdb.com) to learn how we can best help you succeed. |
1 | +# TODO | ||
2 | + | ||
3 | +## v2 | ||
4 | + | ||
5 | +TODO list for v2. Here is a list of things we want to add to v1, but can't because they would be a breaking change. | ||
6 | + | ||
7 | +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Disallow using time as a tag key or field key. | ||
8 | +- [#2124](https://github.com/influxdata/influxdb/issues/2124): Prohibit writes with precision, but without an explicit timestamp. | ||
9 | +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries. |
1 | +version: 0.{build} | ||
2 | +pull_requests: | ||
3 | + do_not_increment_build_number: true | ||
4 | +branches: | ||
5 | + only: | ||
6 | + - master | ||
7 | + | ||
8 | +os: Windows Server 2012 R2 | ||
9 | + | ||
10 | +# Custom clone folder (variables are not expanded here). | ||
11 | +clone_folder: c:\gopath\src\github.com\influxdata\influxdb | ||
12 | + | ||
13 | +# Environment variables | ||
14 | +environment: | ||
15 | + GOROOT: C:\go17 | ||
16 | + GOPATH: C:\gopath | ||
17 | + | ||
18 | +# Scripts that run after cloning repository | ||
19 | +install: | ||
20 | + - set PATH=%GOROOT%\bin;%GOPATH%\bin;%PATH% | ||
21 | + - rmdir c:\go /s /q | ||
22 | + - echo %PATH% | ||
23 | + - echo %GOPATH% | ||
24 | + - cd C:\gopath\src\github.com\influxdata\influxdb | ||
25 | + - go version | ||
26 | + - go env | ||
27 | + - go get github.com/sparrc/gdm | ||
28 | + - cd C:\gopath\src\github.com\influxdata\influxdb | ||
29 | + - gdm restore | ||
30 | + | ||
31 | +# To run your custom scripts instead of automatic MSBuild | ||
32 | +build_script: | ||
33 | + - go get -t -v ./... | ||
34 | + - go test -race -v ./... | ||
35 | + | ||
36 | +# To disable deployment | ||
37 | +deploy: off |
This diff is collapsed. Click to expand it.
1 | +#!/bin/bash | ||
2 | +# Run the build utility via Docker | ||
3 | + | ||
4 | +set -e | ||
5 | + | ||
6 | +# Make sure our working dir is the dir of the script | ||
7 | +DIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd) | ||
8 | +cd $DIR | ||
9 | + | ||
10 | + | ||
11 | +# Build new docker image | ||
12 | +docker build -f Dockerfile_build_ubuntu64 -t influxdb-builder $DIR | ||
13 | +echo "Running build.py" | ||
14 | +# Run docker | ||
15 | +docker run --rm \ | ||
16 | + -e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ | ||
17 | + -e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ | ||
18 | + -v $HOME/.aws.conf:/root/.aws.conf \ | ||
19 | + -v $DIR:/root/go/src/github.com/influxdata/influxdb \ | ||
20 | + influxdb-builder \ | ||
21 | + "$@" | ||
22 | + |
1 | +#!/bin/bash | ||
2 | +# | ||
3 | +# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh. | ||
4 | + | ||
5 | +# Exit if any command fails | ||
6 | +set -e | ||
7 | + | ||
8 | +# Get dir of script and make it is our working directory. | ||
9 | +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) | ||
10 | +cd $DIR | ||
11 | + | ||
12 | +export OUTPUT_DIR="$CIRCLE_ARTIFACTS" | ||
13 | +# Don't delete the container since CircleCI doesn't have permission to do so. | ||
14 | +export DOCKER_RM="false" | ||
15 | + | ||
16 | +# Get number of test environments. | ||
17 | +count=$(./test.sh count) | ||
18 | +# Check that we aren't wasting CircleCI nodes. | ||
19 | +if [ $CIRCLE_NODE_TOTAL -gt $count ] | ||
20 | +then | ||
21 | + echo "More CircleCI nodes allocated than tests environments to run!" | ||
22 | + exit 1 | ||
23 | +fi | ||
24 | + | ||
25 | +# Map CircleCI nodes to test environments. | ||
26 | +tests=$(seq 0 $((count - 1))) | ||
27 | +for i in $tests | ||
28 | +do | ||
29 | + mine=$(( $i % $CIRCLE_NODE_TOTAL )) | ||
30 | + if [ $mine -eq $CIRCLE_NODE_INDEX ] | ||
31 | + then | ||
32 | + echo "Running test env index: $i" | ||
33 | + ./test.sh $i | ||
34 | + fi | ||
35 | +done | ||
36 | + | ||
37 | +# Copy the JUnit test XML to the test reports folder. | ||
38 | +mkdir -p $CIRCLE_TEST_REPORTS/reports | ||
39 | +cp test-results.xml $CIRCLE_TEST_REPORTS/reports/test-results.xml |
1 | +machine: | ||
2 | + services: | ||
3 | + - docker | ||
4 | + environment: | ||
5 | + GODIST: "go1.7.4.linux-amd64.tar.gz" | ||
6 | + post: | ||
7 | + - mkdir -p download | ||
8 | + - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST | ||
9 | + - sudo rm -rf /usr/local/go | ||
10 | + - sudo tar -C /usr/local -xzf download/$GODIST | ||
11 | + | ||
12 | +dependencies: | ||
13 | + cache_directories: | ||
14 | + - "~/docker" | ||
15 | + - ~/download | ||
16 | + override: | ||
17 | + - ./test.sh save: | ||
18 | + # building the docker images can take a long time, hence caching | ||
19 | + timeout: 1800 | ||
20 | + | ||
21 | +test: | ||
22 | + override: | ||
23 | + - bash circle-test.sh: | ||
24 | + parallel: true | ||
25 | + # Race tests using 960s timeout | ||
26 | + timeout: 960 | ||
27 | + | ||
28 | +deployment: | ||
29 | + release: | ||
30 | + tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/ | ||
31 | + commands: | ||
32 | + - > | ||
33 | + docker run | ||
34 | + -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" | ||
35 | + -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" | ||
36 | + -v $(pwd):/root/go/src/github.com/influxdata/influxdb | ||
37 | + influxdb_build_ubuntu64 | ||
38 | + --release | ||
39 | + --package | ||
40 | + --platform all | ||
41 | + --arch all | ||
42 | + --upload | ||
43 | + --bucket dl.influxdata.com/influxdb/releases |
1 | +# InfluxDB Client | ||
2 | + | ||
3 | +[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) | ||
4 | + | ||
5 | +## Description | ||
6 | + | ||
7 | +**NOTE:** The Go client library now has a "v2" version, with the old version | ||
8 | +being deprecated. The new version can be imported at | ||
9 | +`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. | ||
10 | + | ||
11 | +A Go client library written and maintained by the **InfluxDB** team. | ||
12 | +This package provides convenience functions to read and write time series data. | ||
13 | +It uses the HTTP protocol to communicate with your **InfluxDB** cluster. | ||
14 | + | ||
15 | + | ||
16 | +## Getting Started | ||
17 | + | ||
18 | +### Connecting To Your Database | ||
19 | + | ||
20 | +Connecting to an **InfluxDB** database is straightforward. You will need a host | ||
21 | +name, a port and the cluster user credentials if applicable. The default port is | ||
22 | +8086. You can customize these settings to your specific installation via the | ||
23 | +**InfluxDB** configuration file. | ||
24 | + | ||
25 | +Though not necessary for experimentation, you may want to create a new user | ||
26 | +and authenticate the connection to your database. | ||
27 | + | ||
28 | +For more information please check out the | ||
29 | +[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). | ||
30 | + | ||
31 | +For the impatient, you can create a new admin user _bubba_ by firing off the | ||
32 | +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). | ||
33 | + | ||
34 | +```shell | ||
35 | +influx | ||
36 | +> create user bubba with password 'bumblebeetuna' | ||
37 | +> grant all privileges to bubba | ||
38 | +``` | ||
39 | + | ||
40 | +And now for good measure set the credentials in you shell environment. | ||
41 | +In the example below we will use $INFLUX_USER and $INFLUX_PWD | ||
42 | + | ||
43 | +Now with the administrivia out of the way, let's connect to our database. | ||
44 | + | ||
45 | +NOTE: If you've opted out of creating a user, you can omit Username and Password in | ||
46 | +the configuration below. | ||
47 | + | ||
48 | +```go | ||
49 | +package main | ||
50 | + | ||
51 | +import ( | ||
52 | + "log" | ||
53 | + "time" | ||
54 | + | ||
55 | + "github.com/influxdata/influxdb/client/v2" | ||
56 | +) | ||
57 | + | ||
58 | +const ( | ||
59 | + MyDB = "square_holes" | ||
60 | + username = "bubba" | ||
61 | + password = "bumblebeetuna" | ||
62 | +) | ||
63 | + | ||
64 | + | ||
65 | +func main() { | ||
66 | + // Create a new HTTPClient | ||
67 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
68 | + Addr: "http://localhost:8086", | ||
69 | + Username: username, | ||
70 | + Password: password, | ||
71 | + }) | ||
72 | + if err != nil { | ||
73 | + log.Fatal(err) | ||
74 | + } | ||
75 | + | ||
76 | + // Create a new point batch | ||
77 | + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ | ||
78 | + Database: MyDB, | ||
79 | + Precision: "s", | ||
80 | + }) | ||
81 | + if err != nil { | ||
82 | + log.Fatal(err) | ||
83 | + } | ||
84 | + | ||
85 | + // Create a point and add to batch | ||
86 | + tags := map[string]string{"cpu": "cpu-total"} | ||
87 | + fields := map[string]interface{}{ | ||
88 | + "idle": 10.1, | ||
89 | + "system": 53.3, | ||
90 | + "user": 46.6, | ||
91 | + } | ||
92 | + | ||
93 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
94 | + if err != nil { | ||
95 | + log.Fatal(err) | ||
96 | + } | ||
97 | + bp.AddPoint(pt) | ||
98 | + | ||
99 | + // Write the batch | ||
100 | + if err := c.Write(bp); err != nil { | ||
101 | + log.Fatal(err) | ||
102 | + } | ||
103 | +} | ||
104 | + | ||
105 | +``` | ||
106 | + | ||
107 | +### Inserting Data | ||
108 | + | ||
109 | +Time series data aka *points* are written to the database using batch inserts. | ||
110 | +The mechanism is to create one or more points and then create a batch aka | ||
111 | +*batch points* and write these to a given database and series. A series is a | ||
112 | +combination of a measurement (time/values) and a set of tags. | ||
113 | + | ||
114 | +In this sample we will create a batch of a 1,000 points. Each point has a time and | ||
115 | +a single value as well as 2 tags indicating a shape and color. We write these points | ||
116 | +to a database called _square_holes_ using a measurement named _shapes_. | ||
117 | + | ||
118 | +NOTE: You can specify a RetentionPolicy as part of the batch points. If not | ||
119 | +provided InfluxDB will use the database _default_ retention policy. | ||
120 | + | ||
121 | +```go | ||
122 | + | ||
123 | +func writePoints(clnt client.Client) { | ||
124 | + sampleSize := 1000 | ||
125 | + | ||
126 | + bp, err := client.NewBatchPoints(client.BatchPointsConfig{ | ||
127 | + Database: "systemstats", | ||
128 | + Precision: "us", | ||
129 | + }) | ||
130 | + if err != nil { | ||
131 | + log.Fatal(err) | ||
132 | + } | ||
133 | + | ||
134 | + rand.Seed(time.Now().UnixNano()) | ||
135 | + for i := 0; i < sampleSize; i++ { | ||
136 | + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} | ||
137 | + tags := map[string]string{ | ||
138 | + "cpu": "cpu-total", | ||
139 | + "host": fmt.Sprintf("host%d", rand.Intn(1000)), | ||
140 | + "region": regions[rand.Intn(len(regions))], | ||
141 | + } | ||
142 | + | ||
143 | + idle := rand.Float64() * 100.0 | ||
144 | + fields := map[string]interface{}{ | ||
145 | + "idle": idle, | ||
146 | + "busy": 100.0 - idle, | ||
147 | + } | ||
148 | + | ||
149 | + pt, err := client.NewPoint( | ||
150 | + "cpu_usage", | ||
151 | + tags, | ||
152 | + fields, | ||
153 | + time.Now(), | ||
154 | + ) | ||
155 | + if err != nil { | ||
156 | + log.Fatal(err) | ||
157 | + } | ||
158 | + bp.AddPoint(pt) | ||
159 | + } | ||
160 | + | ||
161 | + if err := clnt.Write(bp); err != nil { | ||
162 | + log.Fatal(err) | ||
163 | + } | ||
164 | +} | ||
165 | +``` | ||
166 | + | ||
167 | +### Querying Data | ||
168 | + | ||
169 | +One nice advantage of using **InfluxDB** the ability to query your data using familiar | ||
170 | +SQL constructs. In this example we can create a convenience function to query the database | ||
171 | +as follows: | ||
172 | + | ||
173 | +```go | ||
174 | +// queryDB convenience function to query the database | ||
175 | +func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { | ||
176 | + q := client.Query{ | ||
177 | + Command: cmd, | ||
178 | + Database: MyDB, | ||
179 | + } | ||
180 | + if response, err := clnt.Query(q); err == nil { | ||
181 | + if response.Error() != nil { | ||
182 | + return res, response.Error() | ||
183 | + } | ||
184 | + res = response.Results | ||
185 | + } else { | ||
186 | + return res, err | ||
187 | + } | ||
188 | + return res, nil | ||
189 | +} | ||
190 | +``` | ||
191 | + | ||
192 | +#### Creating a Database | ||
193 | + | ||
194 | +```go | ||
195 | +_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) | ||
196 | +if err != nil { | ||
197 | + log.Fatal(err) | ||
198 | +} | ||
199 | +``` | ||
200 | + | ||
201 | +#### Count Records | ||
202 | + | ||
203 | +```go | ||
204 | +q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) | ||
205 | +res, err := queryDB(clnt, q) | ||
206 | +if err != nil { | ||
207 | + log.Fatal(err) | ||
208 | +} | ||
209 | +count := res[0].Series[0].Values[0][1] | ||
210 | +log.Printf("Found a total of %v records\n", count) | ||
211 | +``` | ||
212 | + | ||
213 | +#### Find the last 10 _shapes_ records | ||
214 | + | ||
215 | +```go | ||
216 | +q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) | ||
217 | +res, err = queryDB(clnt, q) | ||
218 | +if err != nil { | ||
219 | + log.Fatal(err) | ||
220 | +} | ||
221 | + | ||
222 | +for i, row := range res[0].Series[0].Values { | ||
223 | + t, err := time.Parse(time.RFC3339, row[0].(string)) | ||
224 | + if err != nil { | ||
225 | + log.Fatal(err) | ||
226 | + } | ||
227 | + val := row[1].(string) | ||
228 | + log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) | ||
229 | +} | ||
230 | +``` | ||
231 | + | ||
232 | +### Using the UDP Client | ||
233 | + | ||
234 | +The **InfluxDB** client also supports writing over UDP. | ||
235 | + | ||
236 | +```go | ||
237 | +func WriteUDP() { | ||
238 | + // Make client | ||
239 | + c, err := client.NewUDPClient("localhost:8089") | ||
240 | + if err != nil { | ||
241 | + panic(err.Error()) | ||
242 | + } | ||
243 | + | ||
244 | + // Create a new point batch | ||
245 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ | ||
246 | + Precision: "s", | ||
247 | + }) | ||
248 | + | ||
249 | + // Create a point and add to batch | ||
250 | + tags := map[string]string{"cpu": "cpu-total"} | ||
251 | + fields := map[string]interface{}{ | ||
252 | + "idle": 10.1, | ||
253 | + "system": 53.3, | ||
254 | + "user": 46.6, | ||
255 | + } | ||
256 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
257 | + if err != nil { | ||
258 | + panic(err.Error()) | ||
259 | + } | ||
260 | + bp.AddPoint(pt) | ||
261 | + | ||
262 | + // Write the batch | ||
263 | + c.Write(bp) | ||
264 | +} | ||
265 | +``` | ||
266 | + | ||
267 | +### Point Splitting | ||
268 | + | ||
269 | +The UDP client now supports splitting single points that exceed the configured | ||
270 | +payload size. The logic for processing each point is listed here, starting with | ||
271 | +an empty payload. | ||
272 | + | ||
273 | +1. If adding the point to the current (non-empty) payload would exceed the | ||
274 | + configured size, send the current payload. Otherwise, add it to the current | ||
275 | + payload. | ||
276 | +1. If the point is smaller than the configured size, add it to the payload. | ||
277 | +1. If the point has no timestamp, just try to send the entire point as a single | ||
278 | + UDP payload, and process the next point. | ||
279 | +1. Since the point has a timestamp, re-use the existing measurement name, | ||
280 | + tagset, and timestamp and create multiple new points by splitting up the | ||
281 | + fields. The per-point length will be kept close to the configured size, | ||
282 | + staying under it if possible. This does mean that one large field, maybe a | ||
283 | + long string, could be sent as a larger-than-configured payload. | ||
284 | + | ||
285 | +The above logic attempts to respect configured payload sizes, but not sacrifice | ||
286 | +any data integrity. Points without a timestamp can't be split, as that may | ||
287 | +cause fields to have differing timestamps when processed by the server. | ||
288 | + | ||
289 | +## Go Docs | ||
290 | + | ||
291 | +Please refer to | ||
292 | +[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) | ||
293 | +for documentation. | ||
294 | + | ||
295 | +## See Also | ||
296 | + | ||
297 | +You can also examine how the client library is used by the | ||
298 | +[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). |
1 | +package client_test | ||
2 | + | ||
3 | +import ( | ||
4 | + "fmt" | ||
5 | + "log" | ||
6 | + "math/rand" | ||
7 | + "net/url" | ||
8 | + "os" | ||
9 | + "strconv" | ||
10 | + "time" | ||
11 | + | ||
12 | + "github.com/influxdata/influxdb/client" | ||
13 | +) | ||
14 | + | ||
15 | +func ExampleNewClient() { | ||
16 | + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) | ||
17 | + if err != nil { | ||
18 | + log.Fatal(err) | ||
19 | + } | ||
20 | + | ||
21 | + // NOTE: this assumes you've setup a user and have setup shell env variables, | ||
22 | + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. | ||
23 | + conf := client.Config{ | ||
24 | + URL: *host, | ||
25 | + Username: os.Getenv("INFLUX_USER"), | ||
26 | + Password: os.Getenv("INFLUX_PWD"), | ||
27 | + } | ||
28 | + con, err := client.NewClient(conf) | ||
29 | + if err != nil { | ||
30 | + log.Fatal(err) | ||
31 | + } | ||
32 | + log.Println("Connection", con) | ||
33 | +} | ||
34 | + | ||
35 | +func ExampleClient_Ping() { | ||
36 | + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) | ||
37 | + if err != nil { | ||
38 | + log.Fatal(err) | ||
39 | + } | ||
40 | + con, err := client.NewClient(client.Config{URL: *host}) | ||
41 | + if err != nil { | ||
42 | + log.Fatal(err) | ||
43 | + } | ||
44 | + | ||
45 | + dur, ver, err := con.Ping() | ||
46 | + if err != nil { | ||
47 | + log.Fatal(err) | ||
48 | + } | ||
49 | + log.Printf("Happy as a hippo! %v, %s", dur, ver) | ||
50 | +} | ||
51 | + | ||
52 | +func ExampleClient_Query() { | ||
53 | + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) | ||
54 | + if err != nil { | ||
55 | + log.Fatal(err) | ||
56 | + } | ||
57 | + con, err := client.NewClient(client.Config{URL: *host}) | ||
58 | + if err != nil { | ||
59 | + log.Fatal(err) | ||
60 | + } | ||
61 | + | ||
62 | + q := client.Query{ | ||
63 | + Command: "select count(value) from shapes", | ||
64 | + Database: "square_holes", | ||
65 | + } | ||
66 | + if response, err := con.Query(q); err == nil && response.Error() == nil { | ||
67 | + log.Println(response.Results) | ||
68 | + } | ||
69 | +} | ||
70 | + | ||
71 | +func ExampleClient_Write() { | ||
72 | + host, err := url.Parse(fmt.Sprintf("http://%s:%d", "localhost", 8086)) | ||
73 | + if err != nil { | ||
74 | + log.Fatal(err) | ||
75 | + } | ||
76 | + con, err := client.NewClient(client.Config{URL: *host}) | ||
77 | + if err != nil { | ||
78 | + log.Fatal(err) | ||
79 | + } | ||
80 | + | ||
81 | + var ( | ||
82 | + shapes = []string{"circle", "rectangle", "square", "triangle"} | ||
83 | + colors = []string{"red", "blue", "green"} | ||
84 | + sampleSize = 1000 | ||
85 | + pts = make([]client.Point, sampleSize) | ||
86 | + ) | ||
87 | + | ||
88 | + rand.Seed(42) | ||
89 | + for i := 0; i < sampleSize; i++ { | ||
90 | + pts[i] = client.Point{ | ||
91 | + Measurement: "shapes", | ||
92 | + Tags: map[string]string{ | ||
93 | + "color": strconv.Itoa(rand.Intn(len(colors))), | ||
94 | + "shape": strconv.Itoa(rand.Intn(len(shapes))), | ||
95 | + }, | ||
96 | + Fields: map[string]interface{}{ | ||
97 | + "value": rand.Intn(sampleSize), | ||
98 | + }, | ||
99 | + Time: time.Now(), | ||
100 | + Precision: "s", | ||
101 | + } | ||
102 | + } | ||
103 | + | ||
104 | + bps := client.BatchPoints{ | ||
105 | + Points: pts, | ||
106 | + Database: "BumbeBeeTuna", | ||
107 | + RetentionPolicy: "default", | ||
108 | + } | ||
109 | + _, err = con.Write(bps) | ||
110 | + if err != nil { | ||
111 | + log.Fatal(err) | ||
112 | + } | ||
113 | +} |
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 | +package client_test | ||
2 | + | ||
3 | +import ( | ||
4 | + "fmt" | ||
5 | + "math/rand" | ||
6 | + "os" | ||
7 | + "time" | ||
8 | + | ||
9 | + "github.com/influxdata/influxdb/client/v2" | ||
10 | +) | ||
11 | + | ||
12 | +// Create a new client | ||
13 | +func ExampleClient() { | ||
14 | + // NOTE: this assumes you've setup a user and have setup shell env variables, | ||
15 | + // namely INFLUX_USER/INFLUX_PWD. If not just omit Username/Password below. | ||
16 | + _, err := client.NewHTTPClient(client.HTTPConfig{ | ||
17 | + Addr: "http://localhost:8086", | ||
18 | + Username: os.Getenv("INFLUX_USER"), | ||
19 | + Password: os.Getenv("INFLUX_PWD"), | ||
20 | + }) | ||
21 | + if err != nil { | ||
22 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
23 | + } | ||
24 | +} | ||
25 | + | ||
26 | +// Write a point using the UDP client | ||
27 | +func ExampleClient_uDP() { | ||
28 | + // Make client | ||
29 | + config := client.UDPConfig{Addr: "localhost:8089"} | ||
30 | + c, err := client.NewUDPClient(config) | ||
31 | + if err != nil { | ||
32 | + fmt.Println("Error: ", err.Error()) | ||
33 | + } | ||
34 | + defer c.Close() | ||
35 | + | ||
36 | + // Create a new point batch | ||
37 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ | ||
38 | + Precision: "s", | ||
39 | + }) | ||
40 | + | ||
41 | + // Create a point and add to batch | ||
42 | + tags := map[string]string{"cpu": "cpu-total"} | ||
43 | + fields := map[string]interface{}{ | ||
44 | + "idle": 10.1, | ||
45 | + "system": 53.3, | ||
46 | + "user": 46.6, | ||
47 | + } | ||
48 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
49 | + if err != nil { | ||
50 | + fmt.Println("Error: ", err.Error()) | ||
51 | + } | ||
52 | + bp.AddPoint(pt) | ||
53 | + | ||
54 | + // Write the batch | ||
55 | + c.Write(bp) | ||
56 | +} | ||
57 | + | ||
58 | +// Ping the cluster using the HTTP client | ||
59 | +func ExampleClient_Ping() { | ||
60 | + // Make client | ||
61 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
62 | + Addr: "http://localhost:8086", | ||
63 | + }) | ||
64 | + if err != nil { | ||
65 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
66 | + } | ||
67 | + defer c.Close() | ||
68 | + | ||
69 | + _, _, err = c.Ping(0) | ||
70 | + if err != nil { | ||
71 | + fmt.Println("Error pinging InfluxDB Cluster: ", err.Error()) | ||
72 | + } | ||
73 | +} | ||
74 | + | ||
75 | +// Write a point using the HTTP client | ||
76 | +func ExampleClient_write() { | ||
77 | + // Make client | ||
78 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
79 | + Addr: "http://localhost:8086", | ||
80 | + }) | ||
81 | + if err != nil { | ||
82 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
83 | + } | ||
84 | + defer c.Close() | ||
85 | + | ||
86 | + // Create a new point batch | ||
87 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ | ||
88 | + Database: "BumbleBeeTuna", | ||
89 | + Precision: "s", | ||
90 | + }) | ||
91 | + | ||
92 | + // Create a point and add to batch | ||
93 | + tags := map[string]string{"cpu": "cpu-total"} | ||
94 | + fields := map[string]interface{}{ | ||
95 | + "idle": 10.1, | ||
96 | + "system": 53.3, | ||
97 | + "user": 46.6, | ||
98 | + } | ||
99 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
100 | + if err != nil { | ||
101 | + fmt.Println("Error: ", err.Error()) | ||
102 | + } | ||
103 | + bp.AddPoint(pt) | ||
104 | + | ||
105 | + // Write the batch | ||
106 | + c.Write(bp) | ||
107 | +} | ||
108 | + | ||
109 | +// Create a batch and add a point | ||
110 | +func ExampleBatchPoints() { | ||
111 | + // Create a new point batch | ||
112 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ | ||
113 | + Database: "BumbleBeeTuna", | ||
114 | + Precision: "s", | ||
115 | + }) | ||
116 | + | ||
117 | + // Create a point and add to batch | ||
118 | + tags := map[string]string{"cpu": "cpu-total"} | ||
119 | + fields := map[string]interface{}{ | ||
120 | + "idle": 10.1, | ||
121 | + "system": 53.3, | ||
122 | + "user": 46.6, | ||
123 | + } | ||
124 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
125 | + if err != nil { | ||
126 | + fmt.Println("Error: ", err.Error()) | ||
127 | + } | ||
128 | + bp.AddPoint(pt) | ||
129 | +} | ||
130 | + | ||
131 | +// Using the BatchPoints setter functions | ||
132 | +func ExampleBatchPoints_setters() { | ||
133 | + // Create a new point batch | ||
134 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) | ||
135 | + bp.SetDatabase("BumbleBeeTuna") | ||
136 | + bp.SetPrecision("ms") | ||
137 | + | ||
138 | + // Create a point and add to batch | ||
139 | + tags := map[string]string{"cpu": "cpu-total"} | ||
140 | + fields := map[string]interface{}{ | ||
141 | + "idle": 10.1, | ||
142 | + "system": 53.3, | ||
143 | + "user": 46.6, | ||
144 | + } | ||
145 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
146 | + if err != nil { | ||
147 | + fmt.Println("Error: ", err.Error()) | ||
148 | + } | ||
149 | + bp.AddPoint(pt) | ||
150 | +} | ||
151 | + | ||
152 | +// Create a new point with a timestamp | ||
153 | +func ExamplePoint() { | ||
154 | + tags := map[string]string{"cpu": "cpu-total"} | ||
155 | + fields := map[string]interface{}{ | ||
156 | + "idle": 10.1, | ||
157 | + "system": 53.3, | ||
158 | + "user": 46.6, | ||
159 | + } | ||
160 | + pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) | ||
161 | + if err == nil { | ||
162 | + fmt.Println("We created a point: ", pt.String()) | ||
163 | + } | ||
164 | +} | ||
165 | + | ||
166 | +// Create a new point without a timestamp | ||
167 | +func ExamplePoint_withoutTime() { | ||
168 | + tags := map[string]string{"cpu": "cpu-total"} | ||
169 | + fields := map[string]interface{}{ | ||
170 | + "idle": 10.1, | ||
171 | + "system": 53.3, | ||
172 | + "user": 46.6, | ||
173 | + } | ||
174 | + pt, err := client.NewPoint("cpu_usage", tags, fields) | ||
175 | + if err == nil { | ||
176 | + fmt.Println("We created a point w/o time: ", pt.String()) | ||
177 | + } | ||
178 | +} | ||
179 | + | ||
180 | +// Write 1000 points | ||
181 | +func ExampleClient_write1000() { | ||
182 | + sampleSize := 1000 | ||
183 | + | ||
184 | + // Make client | ||
185 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
186 | + Addr: "http://localhost:8086", | ||
187 | + }) | ||
188 | + if err != nil { | ||
189 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
190 | + } | ||
191 | + defer c.Close() | ||
192 | + | ||
193 | + rand.Seed(42) | ||
194 | + | ||
195 | + bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ | ||
196 | + Database: "systemstats", | ||
197 | + Precision: "us", | ||
198 | + }) | ||
199 | + | ||
200 | + for i := 0; i < sampleSize; i++ { | ||
201 | + regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} | ||
202 | + tags := map[string]string{ | ||
203 | + "cpu": "cpu-total", | ||
204 | + "host": fmt.Sprintf("host%d", rand.Intn(1000)), | ||
205 | + "region": regions[rand.Intn(len(regions))], | ||
206 | + } | ||
207 | + | ||
208 | + idle := rand.Float64() * 100.0 | ||
209 | + fields := map[string]interface{}{ | ||
210 | + "idle": idle, | ||
211 | + "busy": 100.0 - idle, | ||
212 | + } | ||
213 | + | ||
214 | + pt, err := client.NewPoint( | ||
215 | + "cpu_usage", | ||
216 | + tags, | ||
217 | + fields, | ||
218 | + time.Now(), | ||
219 | + ) | ||
220 | + if err != nil { | ||
221 | + println("Error:", err.Error()) | ||
222 | + continue | ||
223 | + } | ||
224 | + bp.AddPoint(pt) | ||
225 | + } | ||
226 | + | ||
227 | + err = c.Write(bp) | ||
228 | + if err != nil { | ||
229 | + fmt.Println("Error: ", err.Error()) | ||
230 | + } | ||
231 | +} | ||
232 | + | ||
233 | +// Make a Query | ||
234 | +func ExampleClient_query() { | ||
235 | + // Make client | ||
236 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
237 | + Addr: "http://localhost:8086", | ||
238 | + }) | ||
239 | + if err != nil { | ||
240 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
241 | + } | ||
242 | + defer c.Close() | ||
243 | + | ||
244 | + q := client.NewQuery("SELECT count(value) FROM shapes", "square_holes", "ns") | ||
245 | + if response, err := c.Query(q); err == nil && response.Error() == nil { | ||
246 | + fmt.Println(response.Results) | ||
247 | + } | ||
248 | +} | ||
249 | + | ||
250 | +// Create a Database with a query | ||
251 | +func ExampleClient_createDatabase() { | ||
252 | + // Make client | ||
253 | + c, err := client.NewHTTPClient(client.HTTPConfig{ | ||
254 | + Addr: "http://localhost:8086", | ||
255 | + }) | ||
256 | + if err != nil { | ||
257 | + fmt.Println("Error creating InfluxDB Client: ", err.Error()) | ||
258 | + } | ||
259 | + defer c.Close() | ||
260 | + | ||
261 | + q := client.NewQuery("CREATE DATABASE telegraf", "", "") | ||
262 | + if response, err := c.Query(q); err == nil && response.Error() == nil { | ||
263 | + fmt.Println(response.Results) | ||
264 | + } | ||
265 | +} |
1 | +package client | ||
2 | + | ||
3 | +import ( | ||
4 | + "fmt" | ||
5 | + "io" | ||
6 | + "net" | ||
7 | + "time" | ||
8 | +) | ||
9 | + | ||
10 | +const ( | ||
11 | + // UDPPayloadSize is a reasonable default payload size for UDP packets that | ||
12 | + // could be travelling over the internet. | ||
13 | + UDPPayloadSize = 512 | ||
14 | +) | ||
15 | + | ||
16 | +// UDPConfig is the config data needed to create a UDP Client. | ||
17 | +type UDPConfig struct { | ||
18 | + // Addr should be of the form "host:port" | ||
19 | + // or "[ipv6-host%zone]:port". | ||
20 | + Addr string | ||
21 | + | ||
22 | + // PayloadSize is the maximum size of a UDP client message, optional | ||
23 | + // Tune this based on your network. Defaults to UDPPayloadSize. | ||
24 | + PayloadSize int | ||
25 | +} | ||
26 | + | ||
27 | +// NewUDPClient returns a client interface for writing to an InfluxDB UDP | ||
28 | +// service from the given config. | ||
29 | +func NewUDPClient(conf UDPConfig) (Client, error) { | ||
30 | + var udpAddr *net.UDPAddr | ||
31 | + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) | ||
32 | + if err != nil { | ||
33 | + return nil, err | ||
34 | + } | ||
35 | + | ||
36 | + conn, err := net.DialUDP("udp", nil, udpAddr) | ||
37 | + if err != nil { | ||
38 | + return nil, err | ||
39 | + } | ||
40 | + | ||
41 | + payloadSize := conf.PayloadSize | ||
42 | + if payloadSize == 0 { | ||
43 | + payloadSize = UDPPayloadSize | ||
44 | + } | ||
45 | + | ||
46 | + return &udpclient{ | ||
47 | + conn: conn, | ||
48 | + payloadSize: payloadSize, | ||
49 | + }, nil | ||
50 | +} | ||
51 | + | ||
52 | +// Close releases the udpclient's resources. | ||
53 | +func (uc *udpclient) Close() error { | ||
54 | + return uc.conn.Close() | ||
55 | +} | ||
56 | + | ||
57 | +type udpclient struct { | ||
58 | + conn io.WriteCloser | ||
59 | + payloadSize int | ||
60 | +} | ||
61 | + | ||
62 | +func (uc *udpclient) Write(bp BatchPoints) error { | ||
63 | + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed | ||
64 | + var d, _ = time.ParseDuration("1" + bp.Precision()) | ||
65 | + | ||
66 | + var delayedError error | ||
67 | + | ||
68 | + var checkBuffer = func(n int) { | ||
69 | + if len(b) > 0 && len(b)+n > uc.payloadSize { | ||
70 | + if _, err := uc.conn.Write(b); err != nil { | ||
71 | + delayedError = err | ||
72 | + } | ||
73 | + b = b[:0] | ||
74 | + } | ||
75 | + } | ||
76 | + | ||
77 | + for _, p := range bp.Points() { | ||
78 | + p.pt.Round(d) | ||
79 | + pointSize := p.pt.StringSize() + 1 // include newline in size | ||
80 | + //point := p.pt.RoundedString(d) + "\n" | ||
81 | + | ||
82 | + checkBuffer(pointSize) | ||
83 | + | ||
84 | + if p.Time().IsZero() || pointSize <= uc.payloadSize { | ||
85 | + b = p.pt.AppendString(b) | ||
86 | + b = append(b, '\n') | ||
87 | + continue | ||
88 | + } | ||
89 | + | ||
90 | + points := p.pt.Split(uc.payloadSize - 1) // account for newline character | ||
91 | + for _, sp := range points { | ||
92 | + checkBuffer(sp.StringSize() + 1) | ||
93 | + b = sp.AppendString(b) | ||
94 | + b = append(b, '\n') | ||
95 | + } | ||
96 | + } | ||
97 | + | ||
98 | + if len(b) > 0 { | ||
99 | + if _, err := uc.conn.Write(b); err != nil { | ||
100 | + return err | ||
101 | + } | ||
102 | + } | ||
103 | + return delayedError | ||
104 | +} | ||
105 | + | ||
106 | +func (uc *udpclient) Query(q Query) (*Response, error) { | ||
107 | + return nil, fmt.Errorf("Querying via UDP is not supported") | ||
108 | +} | ||
109 | + | ||
110 | +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { | ||
111 | + return 0, "", nil | ||
112 | +} |
This diff is collapsed. Click to expand it.
1 | +package cli | ||
2 | + | ||
3 | +import "testing" | ||
4 | + | ||
5 | +func TestParseCommand_InsertInto(t *testing.T) { | ||
6 | + t.Parallel() | ||
7 | + | ||
8 | + c := CommandLine{} | ||
9 | + | ||
10 | + tests := []struct { | ||
11 | + cmd, db, rp string | ||
12 | + }{ | ||
13 | + { | ||
14 | + cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, | ||
15 | + db: "", | ||
16 | + rp: "test", | ||
17 | + }, | ||
18 | + { | ||
19 | + cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, | ||
20 | + db: "", | ||
21 | + rp: "test", | ||
22 | + }, | ||
23 | + { | ||
24 | + cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, | ||
25 | + db: "", | ||
26 | + rp: "test test", | ||
27 | + }, | ||
28 | + { | ||
29 | + cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, | ||
30 | + db: "test", | ||
31 | + rp: "test", | ||
32 | + }, | ||
33 | + { | ||
34 | + cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, | ||
35 | + db: "", | ||
36 | + rp: "test test", | ||
37 | + }, | ||
38 | + { | ||
39 | + cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, | ||
40 | + db: "d b", | ||
41 | + rp: "test test", | ||
42 | + }, | ||
43 | + } | ||
44 | + | ||
45 | + for _, test := range tests { | ||
46 | + t.Logf("command: %s", test.cmd) | ||
47 | + bp, err := c.parseInsert(test.cmd) | ||
48 | + if err != nil { | ||
49 | + t.Fatal(err) | ||
50 | + } | ||
51 | + if bp.Database != test.db { | ||
52 | + t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, bp.Database) | ||
53 | + } | ||
54 | + if bp.RetentionPolicy != test.rp { | ||
55 | + t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, bp.RetentionPolicy) | ||
56 | + } | ||
57 | + } | ||
58 | +} |
This diff is collapsed. Click to expand it.
1 | +package cli | ||
2 | + | ||
3 | +import ( | ||
4 | + "bytes" | ||
5 | + "fmt" | ||
6 | +) | ||
7 | + | ||
8 | +func parseDatabaseAndRetentionPolicy(stmt []byte) (string, string, error) { | ||
9 | + var db, rp []byte | ||
10 | + var quoted bool | ||
11 | + var seperatorCount int | ||
12 | + | ||
13 | + stmt = bytes.TrimSpace(stmt) | ||
14 | + | ||
15 | + for _, b := range stmt { | ||
16 | + if b == '"' { | ||
17 | + quoted = !quoted | ||
18 | + continue | ||
19 | + } | ||
20 | + if b == '.' && !quoted { | ||
21 | + seperatorCount++ | ||
22 | + if seperatorCount > 1 { | ||
23 | + return "", "", fmt.Errorf("unable to parse database and retention policy from %s", string(stmt)) | ||
24 | + } | ||
25 | + continue | ||
26 | + } | ||
27 | + if seperatorCount == 1 { | ||
28 | + rp = append(rp, b) | ||
29 | + continue | ||
30 | + } | ||
31 | + db = append(db, b) | ||
32 | + } | ||
33 | + return string(db), string(rp), nil | ||
34 | +} |
1 | +package cli | ||
2 | + | ||
3 | +import ( | ||
4 | + "errors" | ||
5 | + "testing" | ||
6 | +) | ||
7 | + | ||
8 | +func Test_parseDatabaseAndretentionPolicy(t *testing.T) { | ||
9 | + tests := []struct { | ||
10 | + stmt string | ||
11 | + db string | ||
12 | + rp string | ||
13 | + err error | ||
14 | + }{ | ||
15 | + { | ||
16 | + stmt: `foo`, | ||
17 | + db: "foo", | ||
18 | + }, | ||
19 | + { | ||
20 | + stmt: `"foo.bar"`, | ||
21 | + db: "foo.bar", | ||
22 | + }, | ||
23 | + { | ||
24 | + stmt: `"foo.bar".`, | ||
25 | + db: "foo.bar", | ||
26 | + }, | ||
27 | + { | ||
28 | + stmt: `."foo.bar"`, | ||
29 | + rp: "foo.bar", | ||
30 | + }, | ||
31 | + { | ||
32 | + stmt: `foo.bar`, | ||
33 | + db: "foo", | ||
34 | + rp: "bar", | ||
35 | + }, | ||
36 | + { | ||
37 | + stmt: `"foo".bar`, | ||
38 | + db: "foo", | ||
39 | + rp: "bar", | ||
40 | + }, | ||
41 | + { | ||
42 | + stmt: `"foo"."bar"`, | ||
43 | + db: "foo", | ||
44 | + rp: "bar", | ||
45 | + }, | ||
46 | + { | ||
47 | + stmt: `"foo.bin"."bar"`, | ||
48 | + db: "foo.bin", | ||
49 | + rp: "bar", | ||
50 | + }, | ||
51 | + { | ||
52 | + stmt: `"foo.bin"."bar.baz...."`, | ||
53 | + db: "foo.bin", | ||
54 | + rp: "bar.baz....", | ||
55 | + }, | ||
56 | + { | ||
57 | + stmt: ` "foo.bin"."bar.baz...." `, | ||
58 | + db: "foo.bin", | ||
59 | + rp: "bar.baz....", | ||
60 | + }, | ||
61 | + | ||
62 | + { | ||
63 | + stmt: `"foo.bin"."bar".boom`, | ||
64 | + err: errors.New("foo"), | ||
65 | + }, | ||
66 | + { | ||
67 | + stmt: "foo.bar.", | ||
68 | + err: errors.New("foo"), | ||
69 | + }, | ||
70 | + } | ||
71 | + | ||
72 | + for _, test := range tests { | ||
73 | + db, rp, err := parseDatabaseAndRetentionPolicy([]byte(test.stmt)) | ||
74 | + if err != nil && test.err == nil { | ||
75 | + t.Errorf("unexpected error: got %s", err) | ||
76 | + continue | ||
77 | + } | ||
78 | + if test.err != nil && err == nil { | ||
79 | + t.Errorf("expected err: got: nil, exp: %s", test.err) | ||
80 | + continue | ||
81 | + } | ||
82 | + if db != test.db { | ||
83 | + t.Errorf("unexpected database: got: %s, exp: %s", db, test.db) | ||
84 | + } | ||
85 | + if rp != test.rp { | ||
86 | + t.Errorf("unexpected retention policy: got: %s, exp: %s", rp, test.rp) | ||
87 | + } | ||
88 | + } | ||
89 | + | ||
90 | +} |
1 | +// The influx command is a CLI client to InfluxDB. | ||
2 | +package main | ||
3 | + | ||
4 | +import ( | ||
5 | + "flag" | ||
6 | + "fmt" | ||
7 | + "os" | ||
8 | + | ||
9 | + "github.com/influxdata/influxdb/client" | ||
10 | + "github.com/influxdata/influxdb/cmd/influx/cli" | ||
11 | +) | ||
12 | + | ||
13 | +// These variables are populated via the Go linker. | ||
14 | +var ( | ||
15 | + version string | ||
16 | +) | ||
17 | + | ||
18 | +const ( | ||
19 | + // defaultFormat is the default format of the results when issuing queries | ||
20 | + defaultFormat = "column" | ||
21 | + | ||
22 | + // defaultPrecision is the default timestamp format of the results when issuing queries | ||
23 | + defaultPrecision = "ns" | ||
24 | + | ||
25 | + // defaultPPS is the default points per second that the import will throttle at | ||
26 | + // by default it's 0, which means it will not throttle | ||
27 | + defaultPPS = 0 | ||
28 | +) | ||
29 | + | ||
30 | +func init() { | ||
31 | + // If version is not set, make that clear. | ||
32 | + if version == "" { | ||
33 | + version = "unknown" | ||
34 | + } | ||
35 | +} | ||
36 | + | ||
37 | +func main() { | ||
38 | + c := cli.New(version) | ||
39 | + | ||
40 | + fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) | ||
41 | + fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") | ||
42 | + fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") | ||
43 | + fs.StringVar(&c.ClientConfig.UnixSocket, "socket", "", "Influxdb unix socket to connect to.") | ||
44 | + fs.StringVar(&c.ClientConfig.Username, "username", "", "Username to connect to the server.") | ||
45 | + fs.StringVar(&c.ClientConfig.Password, "password", "", `Password to connect to the server. Leaving blank will prompt for password (--password="").`) | ||
46 | + fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") | ||
47 | + fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") | ||
48 | + fs.BoolVar(&c.ClientConfig.UnsafeSsl, "unsafeSsl", false, "Set this when connecting to the cluster using https and not use SSL verification.") | ||
49 | + fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") | ||
50 | + fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") | ||
51 | + fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.") | ||
52 | + fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") | ||
53 | + fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") | ||
54 | + fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") | ||
55 | + fs.BoolVar(&c.Import, "import", false, "Import a previous database.") | ||
56 | + fs.IntVar(&c.ImporterConfig.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") | ||
57 | + fs.StringVar(&c.ImporterConfig.Path, "path", "", "path to the file to import") | ||
58 | + fs.BoolVar(&c.ImporterConfig.Compressed, "compressed", false, "set to true if the import file is compressed") | ||
59 | + | ||
60 | + // Define our own custom usage to print | ||
61 | + fs.Usage = func() { | ||
62 | + fmt.Println(`Usage of influx: | ||
63 | + -version | ||
64 | + Display the version and exit. | ||
65 | + -host 'host name' | ||
66 | + Host to connect to. | ||
67 | + -port 'port #' | ||
68 | + Port to connect to. | ||
69 | + -socket 'unix domain socket' | ||
70 | + Unix socket to connect to. | ||
71 | + -database 'database name' | ||
72 | + Database to connect to the server. | ||
73 | + -password 'password' | ||
74 | + Password to connect to the server. Leaving blank will prompt for password (--password ''). | ||
75 | + -username 'username' | ||
76 | + Username to connect to the server. | ||
77 | + -ssl | ||
78 | + Use https for requests. | ||
79 | + -unsafeSsl | ||
80 | + Set this when connecting to the cluster using https and not use SSL verification. | ||
81 | + -execute 'command' | ||
82 | + Execute command and quit. | ||
83 | + -format 'json|csv|column' | ||
84 | + Format specifies the format of the server responses: json, csv, or column. | ||
85 | + -precision 'rfc3339|h|m|s|ms|u|ns' | ||
86 | + Precision specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns. | ||
87 | + -consistency 'any|one|quorum|all' | ||
88 | + Set write consistency level: any, one, quorum, or all | ||
89 | + -pretty | ||
90 | + Turns on pretty print for the json format. | ||
91 | + -import | ||
92 | + Import a previous database export from file | ||
93 | + -pps | ||
94 | + How many points per second the import will allow. By default it is zero and will not throttle importing. | ||
95 | + -path | ||
96 | + Path to file to import | ||
97 | + -compressed | ||
98 | + Set to true if the import file is compressed | ||
99 | + | ||
100 | +Examples: | ||
101 | + | ||
102 | + # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: | ||
103 | + $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty | ||
104 | + | ||
105 | + # Connect to a specific database on startup and set database context: | ||
106 | + $ influx -database 'metrics' -host 'localhost' -port '8086' | ||
107 | +`) | ||
108 | + } | ||
109 | + fs.Parse(os.Args[1:]) | ||
110 | + | ||
111 | + if c.ShowVersion { | ||
112 | + c.Version() | ||
113 | + os.Exit(0) | ||
114 | + } | ||
115 | + | ||
116 | + if err := c.Run(); err != nil { | ||
117 | + fmt.Fprintf(os.Stderr, "%s\n", err) | ||
118 | + os.Exit(1) | ||
119 | + } | ||
120 | +} |
1 | +# `influx_inspect` | ||
2 | + | ||
3 | +## Ways to run | ||
4 | + | ||
5 | +### `influx_inspect` | ||
6 | +Will print usage for the tool. | ||
7 | + | ||
8 | +### `influx_inspect report` | ||
9 | +Displays series meta-data for all shards. Default location [$HOME/.influxdb] | ||
10 | + | ||
11 | +### `influx_inspect dumptsm` | ||
12 | +Dumps low-level details about tsm1 files | ||
13 | + | ||
14 | +#### Flags | ||
15 | + | ||
16 | +##### `-index` bool | ||
17 | +Dump raw index data. | ||
18 | + | ||
19 | +`default` = false | ||
20 | + | ||
21 | +#### `-blocks` bool | ||
22 | +Dump raw block data. | ||
23 | + | ||
24 | +`default` = false | ||
25 | + | ||
26 | +#### `-all` | ||
27 | +Dump all data. Caution: This may print a lot of information. | ||
28 | + | ||
29 | +`default` = false | ||
30 | + | ||
31 | +#### `-filter-key` | ||
32 | +Only display index and block data match this key substring. | ||
33 | + | ||
34 | +`default` = "" | ||
35 | + | ||
36 | + | ||
37 | +### `influx_inspect export` | ||
38 | +Exports all tsm files to line protocol. This output file can be imported via the [influx](https://github.com/influxdata/influxdb/tree/master/importer#running-the-import-command) command. | ||
39 | + | ||
40 | + | ||
41 | +#### `-datadir` string | ||
42 | +Data storage path. | ||
43 | + | ||
44 | +`default` = "$HOME/.influxdb/data" | ||
45 | + | ||
46 | +#### `-waldir` string | ||
47 | +WAL storage path. | ||
48 | + | ||
49 | +`default` = "$HOME/.influxdb/wal" | ||
50 | + | ||
51 | +#### `-out` string | ||
52 | +Destination file to export to | ||
53 | + | ||
54 | +`default` = "$HOME/.influxdb/export" | ||
55 | + | ||
56 | +#### `-database` string (optional) | ||
57 | +Database to export. | ||
58 | + | ||
59 | +`default` = "" | ||
60 | + | ||
61 | +#### `-retention` string (optional) | ||
62 | +Retention policy to export. | ||
63 | + | ||
64 | +`default` = "" | ||
65 | + | ||
66 | +#### `-start` string (optional) | ||
67 | +Optional. The time range to start at. | ||
68 | + | ||
69 | +#### `-end` string (optional) | ||
70 | +Optional. The time range to end at. | ||
71 | + | ||
72 | +#### `-compress` bool (optional) | ||
73 | +Compress the output. | ||
74 | + | ||
75 | +`default` = false | ||
76 | + | ||
77 | +#### Sample Commands | ||
78 | + | ||
79 | +Export entire database and compress output: | ||
80 | +``` | ||
81 | +influx_inspect export --compress | ||
82 | +``` | ||
83 | + | ||
84 | +Export specific retention policy: | ||
85 | +``` | ||
86 | +influx_inspect export --database mydb --retention autogen | ||
87 | +``` | ||
88 | + | ||
89 | +##### Sample Data | ||
90 | +This is a sample of what the output will look like. | ||
91 | + | ||
92 | +``` | ||
93 | +# DDL | ||
94 | +CREATE DATABASE MY_DB_NAME | ||
95 | +CREATE RETENTION POLICY autogen ON MY_DB_NAME DURATION inf REPLICATION 1 | ||
96 | + | ||
97 | +# DML | ||
98 | +# CONTEXT-DATABASE:MY_DB_NAME | ||
99 | +# CONTEXT-RETENTION-POLICY:autogen | ||
100 | +randset value=97.9296104805 1439856000000000000 | ||
101 | +randset value=25.3849066842 1439856100000000000 | ||
102 | +``` | ||
103 | + | ||
104 | +# Caveats | ||
105 | + | ||
106 | +The system does not have access to the meta store when exporting TSM shards. As such, it always creates the retention policy with infinite duration and replication factor of 1. | ||
107 | +End users may want to change this prior to re-importing if they are importing to a cluster or want a different duration for retention. |
1 | +// Package dumptsm inspects low-level details about tsm1 files. | ||
2 | +package dumptsm | ||
3 | + | ||
4 | +import ( | ||
5 | + "encoding/binary" | ||
6 | + "flag" | ||
7 | + "fmt" | ||
8 | + "io" | ||
9 | + "os" | ||
10 | + "strconv" | ||
11 | + "strings" | ||
12 | + "text/tabwriter" | ||
13 | + "time" | ||
14 | + | ||
15 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
16 | +) | ||
17 | + | ||
18 | +// Command represents the program execution for "influxd dumptsm". | ||
19 | +type Command struct { | ||
20 | + // Standard input/output, overridden for testing. | ||
21 | + Stderr io.Writer | ||
22 | + Stdout io.Writer | ||
23 | + | ||
24 | + dumpIndex bool | ||
25 | + dumpBlocks bool | ||
26 | + dumpAll bool | ||
27 | + filterKey string | ||
28 | + path string | ||
29 | +} | ||
30 | + | ||
31 | +// NewCommand returns a new instance of Command. | ||
32 | +func NewCommand() *Command { | ||
33 | + return &Command{ | ||
34 | + Stderr: os.Stderr, | ||
35 | + Stdout: os.Stdout, | ||
36 | + } | ||
37 | +} | ||
38 | + | ||
39 | +// Run executes the command. | ||
40 | +func (cmd *Command) Run(args ...string) error { | ||
41 | + fs := flag.NewFlagSet("file", flag.ExitOnError) | ||
42 | + fs.BoolVar(&cmd.dumpIndex, "index", false, "Dump raw index data") | ||
43 | + fs.BoolVar(&cmd.dumpBlocks, "blocks", false, "Dump raw block data") | ||
44 | + fs.BoolVar(&cmd.dumpAll, "all", false, "Dump all data. Caution: This may print a lot of information") | ||
45 | + fs.StringVar(&cmd.filterKey, "filter-key", "", "Only display index and block data match this key substring") | ||
46 | + | ||
47 | + fs.SetOutput(cmd.Stdout) | ||
48 | + fs.Usage = cmd.printUsage | ||
49 | + | ||
50 | + if err := fs.Parse(args); err != nil { | ||
51 | + return err | ||
52 | + } | ||
53 | + | ||
54 | + if fs.Arg(0) == "" { | ||
55 | + fmt.Printf("TSM file not specified\n\n") | ||
56 | + fs.Usage() | ||
57 | + return nil | ||
58 | + } | ||
59 | + cmd.path = fs.Args()[0] | ||
60 | + cmd.dumpBlocks = cmd.dumpBlocks || cmd.dumpAll || cmd.filterKey != "" | ||
61 | + cmd.dumpIndex = cmd.dumpIndex || cmd.dumpAll || cmd.filterKey != "" | ||
62 | + return cmd.dump() | ||
63 | +} | ||
64 | + | ||
65 | +func (cmd *Command) dump() error { | ||
66 | + var errors []error | ||
67 | + | ||
68 | + f, err := os.Open(cmd.path) | ||
69 | + if err != nil { | ||
70 | + return err | ||
71 | + } | ||
72 | + | ||
73 | + // Get the file size | ||
74 | + stat, err := f.Stat() | ||
75 | + if err != nil { | ||
76 | + return err | ||
77 | + } | ||
78 | + b := make([]byte, 8) | ||
79 | + | ||
80 | + r, err := tsm1.NewTSMReader(f) | ||
81 | + if err != nil { | ||
82 | + return fmt.Errorf("Error opening TSM files: %s", err.Error()) | ||
83 | + } | ||
84 | + defer r.Close() | ||
85 | + | ||
86 | + minTime, maxTime := r.TimeRange() | ||
87 | + keyCount := r.KeyCount() | ||
88 | + | ||
89 | + blockStats := &blockStats{} | ||
90 | + | ||
91 | + println("Summary:") | ||
92 | + fmt.Printf(" File: %s\n", cmd.path) | ||
93 | + fmt.Printf(" Time Range: %s - %s\n", | ||
94 | + time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), | ||
95 | + time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), | ||
96 | + ) | ||
97 | + fmt.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) | ||
98 | + fmt.Printf(" Series: %d ", keyCount) | ||
99 | + fmt.Printf(" File Size: %d\n", stat.Size()) | ||
100 | + println() | ||
101 | + | ||
102 | + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) | ||
103 | + | ||
104 | + if cmd.dumpIndex { | ||
105 | + println("Index:") | ||
106 | + tw.Flush() | ||
107 | + println() | ||
108 | + | ||
109 | + fmt.Fprintln(tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) | ||
110 | + var pos int | ||
111 | + for i := 0; i < keyCount; i++ { | ||
112 | + key, _ := r.KeyAt(i) | ||
113 | + for _, e := range r.Entries(string(key)) { | ||
114 | + pos++ | ||
115 | + split := strings.Split(string(key), "#!~#") | ||
116 | + | ||
117 | + // Possible corruption? Try to read as much as we can and point to the problem. | ||
118 | + measurement := split[0] | ||
119 | + field := split[1] | ||
120 | + | ||
121 | + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { | ||
122 | + continue | ||
123 | + } | ||
124 | + fmt.Fprintln(tw, " "+strings.Join([]string{ | ||
125 | + strconv.FormatInt(int64(pos), 10), | ||
126 | + time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), | ||
127 | + time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), | ||
128 | + strconv.FormatInt(int64(e.Offset), 10), | ||
129 | + strconv.FormatInt(int64(e.Size), 10), | ||
130 | + measurement, | ||
131 | + field, | ||
132 | + }, "\t")) | ||
133 | + tw.Flush() | ||
134 | + } | ||
135 | + } | ||
136 | + } | ||
137 | + | ||
138 | + tw = tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) | ||
139 | + fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) | ||
140 | + | ||
141 | + // Starting at 5 because the magic number is 4 bytes + 1 byte version | ||
142 | + i := int64(5) | ||
143 | + var blockCount, pointCount, blockSize int64 | ||
144 | + indexSize := r.IndexSize() | ||
145 | + | ||
146 | + // Start at the beginning and read every block | ||
147 | + for j := 0; j < keyCount; j++ { | ||
148 | + key, _ := r.KeyAt(j) | ||
149 | + for _, e := range r.Entries(string(key)) { | ||
150 | + | ||
151 | + f.Seek(int64(e.Offset), 0) | ||
152 | + f.Read(b[:4]) | ||
153 | + | ||
154 | + chksum := binary.BigEndian.Uint32(b[:4]) | ||
155 | + | ||
156 | + buf := make([]byte, e.Size-4) | ||
157 | + f.Read(buf) | ||
158 | + | ||
159 | + blockSize += int64(e.Size) | ||
160 | + | ||
161 | + if cmd.filterKey != "" && !strings.Contains(string(key), cmd.filterKey) { | ||
162 | + i += blockSize | ||
163 | + blockCount++ | ||
164 | + continue | ||
165 | + } | ||
166 | + | ||
167 | + blockType := buf[0] | ||
168 | + | ||
169 | + encoded := buf[1:] | ||
170 | + | ||
171 | + var v []tsm1.Value | ||
172 | + v, err := tsm1.DecodeBlock(buf, v) | ||
173 | + if err != nil { | ||
174 | + return err | ||
175 | + } | ||
176 | + startTime := time.Unix(0, v[0].UnixNano()) | ||
177 | + | ||
178 | + pointCount += int64(len(v)) | ||
179 | + | ||
180 | + // Length of the timestamp block | ||
181 | + tsLen, j := binary.Uvarint(encoded) | ||
182 | + | ||
183 | + // Unpack the timestamp bytes | ||
184 | + ts := encoded[int(j) : int(j)+int(tsLen)] | ||
185 | + | ||
186 | + // Unpack the value bytes | ||
187 | + values := encoded[int(j)+int(tsLen):] | ||
188 | + | ||
189 | + tsEncoding := timeEnc[int(ts[0]>>4)] | ||
190 | + vEncoding := encDescs[int(blockType+1)][values[0]>>4] | ||
191 | + | ||
192 | + typeDesc := blockTypes[blockType] | ||
193 | + | ||
194 | + blockStats.inc(0, ts[0]>>4) | ||
195 | + blockStats.inc(int(blockType+1), values[0]>>4) | ||
196 | + blockStats.size(len(buf)) | ||
197 | + | ||
198 | + if cmd.dumpBlocks { | ||
199 | + fmt.Fprintln(tw, " "+strings.Join([]string{ | ||
200 | + strconv.FormatInt(blockCount, 10), | ||
201 | + strconv.FormatUint(uint64(chksum), 10), | ||
202 | + strconv.FormatInt(i, 10), | ||
203 | + strconv.FormatInt(int64(len(buf)), 10), | ||
204 | + typeDesc, | ||
205 | + startTime.UTC().Format(time.RFC3339Nano), | ||
206 | + strconv.FormatInt(int64(len(v)), 10), | ||
207 | + fmt.Sprintf("%s/%s", tsEncoding, vEncoding), | ||
208 | + fmt.Sprintf("%d/%d", len(ts), len(values)), | ||
209 | + }, "\t")) | ||
210 | + } | ||
211 | + | ||
212 | + i += blockSize | ||
213 | + blockCount++ | ||
214 | + } | ||
215 | + } | ||
216 | + | ||
217 | + if cmd.dumpBlocks { | ||
218 | + println("Blocks:") | ||
219 | + tw.Flush() | ||
220 | + println() | ||
221 | + } | ||
222 | + | ||
223 | + var blockSizeAvg int64 | ||
224 | + if blockCount > 0 { | ||
225 | + blockSizeAvg = blockSize / blockCount | ||
226 | + } | ||
227 | + fmt.Printf("Statistics\n") | ||
228 | + fmt.Printf(" Blocks:\n") | ||
229 | + fmt.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", | ||
230 | + blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) | ||
231 | + fmt.Printf(" Index:\n") | ||
232 | + fmt.Printf(" Total: %d Size: %d\n", blockCount, indexSize) | ||
233 | + fmt.Printf(" Points:\n") | ||
234 | + fmt.Printf(" Total: %d", pointCount) | ||
235 | + println() | ||
236 | + | ||
237 | + println(" Encoding:") | ||
238 | + for i, counts := range blockStats.counts { | ||
239 | + if len(counts) == 0 { | ||
240 | + continue | ||
241 | + } | ||
242 | + fmt.Printf(" %s: ", strings.Title(fieldType[i])) | ||
243 | + for j, v := range counts { | ||
244 | + fmt.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) | ||
245 | + } | ||
246 | + println() | ||
247 | + } | ||
248 | + fmt.Printf(" Compression:\n") | ||
249 | + fmt.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) | ||
250 | + fmt.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) | ||
251 | + | ||
252 | + if len(errors) > 0 { | ||
253 | + println() | ||
254 | + fmt.Printf("Errors (%d):\n", len(errors)) | ||
255 | + for _, err := range errors { | ||
256 | + fmt.Printf(" * %v\n", err) | ||
257 | + } | ||
258 | + println() | ||
259 | + return fmt.Errorf("error count %d", len(errors)) | ||
260 | + } | ||
261 | + return nil | ||
262 | +} | ||
263 | + | ||
264 | +// printUsage prints the usage message to STDERR. | ||
265 | +func (cmd *Command) printUsage() { | ||
266 | + usage := `Dumps low-level details about tsm1 files. | ||
267 | + | ||
268 | +Usage: influx_inspect dumptsm [flags] <path | ||
269 | + | ||
270 | + -index | ||
271 | + Dump raw index data | ||
272 | + -blocks | ||
273 | + Dump raw block data | ||
274 | + -all | ||
275 | + Dump all data. Caution: This may print a lot of information | ||
276 | + -filter-key <name> | ||
277 | + Only display index and block data match this key substring | ||
278 | +` | ||
279 | + | ||
280 | + fmt.Fprintf(cmd.Stdout, usage) | ||
281 | +} | ||
282 | + | ||
283 | +var ( | ||
284 | + fieldType = []string{ | ||
285 | + "timestamp", "float", "int", "bool", "string", | ||
286 | + } | ||
287 | + blockTypes = []string{ | ||
288 | + "float64", "int64", "bool", "string", | ||
289 | + } | ||
290 | + timeEnc = []string{ | ||
291 | + "none", "s8b", "rle", | ||
292 | + } | ||
293 | + floatEnc = []string{ | ||
294 | + "none", "gor", | ||
295 | + } | ||
296 | + intEnc = []string{ | ||
297 | + "none", "s8b", "rle", | ||
298 | + } | ||
299 | + boolEnc = []string{ | ||
300 | + "none", "bp", | ||
301 | + } | ||
302 | + stringEnc = []string{ | ||
303 | + "none", "snpy", | ||
304 | + } | ||
305 | + encDescs = [][]string{ | ||
306 | + timeEnc, floatEnc, intEnc, boolEnc, stringEnc, | ||
307 | + } | ||
308 | +) | ||
309 | + | ||
310 | +type blockStats struct { | ||
311 | + min, max int | ||
312 | + counts [][]int | ||
313 | +} | ||
314 | + | ||
315 | +func (b *blockStats) inc(typ int, enc byte) { | ||
316 | + for len(b.counts) <= typ { | ||
317 | + b.counts = append(b.counts, []int{}) | ||
318 | + } | ||
319 | + for len(b.counts[typ]) <= int(enc) { | ||
320 | + b.counts[typ] = append(b.counts[typ], 0) | ||
321 | + } | ||
322 | + b.counts[typ][enc]++ | ||
323 | +} | ||
324 | + | ||
325 | +func (b *blockStats) size(sz int) { | ||
326 | + if b.min == 0 || sz < b.min { | ||
327 | + b.min = sz | ||
328 | + } | ||
329 | + if b.min == 0 || sz > b.max { | ||
330 | + b.max = sz | ||
331 | + } | ||
332 | +} |
This diff is collapsed. Click to expand it.
1 | +package export | ||
2 | + | ||
3 | +import ( | ||
4 | + "bytes" | ||
5 | + "fmt" | ||
6 | + "io/ioutil" | ||
7 | + "math" | ||
8 | + "math/rand" | ||
9 | + "os" | ||
10 | + "sort" | ||
11 | + "strconv" | ||
12 | + "strings" | ||
13 | + "testing" | ||
14 | + | ||
15 | + "github.com/golang/snappy" | ||
16 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
17 | +) | ||
18 | + | ||
19 | +type corpus map[string][]tsm1.Value | ||
20 | + | ||
21 | +var ( | ||
22 | + basicCorpus = corpus{ | ||
23 | + tsm1.SeriesFieldKey("floats,k=f", "f"): []tsm1.Value{ | ||
24 | + tsm1.NewValue(1, float64(1.5)), | ||
25 | + tsm1.NewValue(2, float64(3)), | ||
26 | + }, | ||
27 | + tsm1.SeriesFieldKey("ints,k=i", "i"): []tsm1.Value{ | ||
28 | + tsm1.NewValue(10, int64(15)), | ||
29 | + tsm1.NewValue(20, int64(30)), | ||
30 | + }, | ||
31 | + tsm1.SeriesFieldKey("bools,k=b", "b"): []tsm1.Value{ | ||
32 | + tsm1.NewValue(100, true), | ||
33 | + tsm1.NewValue(200, false), | ||
34 | + }, | ||
35 | + tsm1.SeriesFieldKey("strings,k=s", "s"): []tsm1.Value{ | ||
36 | + tsm1.NewValue(1000, "1k"), | ||
37 | + tsm1.NewValue(2000, "2k"), | ||
38 | + }, | ||
39 | + } | ||
40 | + | ||
41 | + basicCorpusExpLines = []string{ | ||
42 | + "floats,k=f f=1.5 1", | ||
43 | + "floats,k=f f=3 2", | ||
44 | + "ints,k=i i=15i 10", | ||
45 | + "ints,k=i i=30i 20", | ||
46 | + "bools,k=b b=true 100", | ||
47 | + "bools,k=b b=false 200", | ||
48 | + `strings,k=s s="1k" 1000`, | ||
49 | + `strings,k=s s="2k" 2000`, | ||
50 | + } | ||
51 | + | ||
52 | + escapeStringCorpus = corpus{ | ||
53 | + tsm1.SeriesFieldKey("t", "s"): []tsm1.Value{ | ||
54 | + tsm1.NewValue(1, `1. "quotes"`), | ||
55 | + tsm1.NewValue(2, `2. back\slash`), | ||
56 | + tsm1.NewValue(3, `3. bs\q"`), | ||
57 | + }, | ||
58 | + } | ||
59 | + | ||
60 | + escCorpusExpLines = []string{ | ||
61 | + `t s="1. \"quotes\"" 1`, | ||
62 | + `t s="2. back\\slash" 2`, | ||
63 | + `t s="3. bs\\q\"" 3`, | ||
64 | + } | ||
65 | +) | ||
66 | + | ||
67 | +func Test_exportWALFile(t *testing.T) { | ||
68 | + for _, c := range []struct { | ||
69 | + corpus corpus | ||
70 | + lines []string | ||
71 | + }{ | ||
72 | + {corpus: basicCorpus, lines: basicCorpusExpLines}, | ||
73 | + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, | ||
74 | + } { | ||
75 | + walFile := writeCorpusToWALFile(c.corpus) | ||
76 | + defer os.Remove(walFile.Name()) | ||
77 | + | ||
78 | + var out bytes.Buffer | ||
79 | + if err := newCommand().exportWALFile(walFile.Name(), &out, func() {}); err != nil { | ||
80 | + t.Fatal(err) | ||
81 | + } | ||
82 | + | ||
83 | + lines := strings.Split(out.String(), "\n") | ||
84 | + for _, exp := range c.lines { | ||
85 | + found := false | ||
86 | + for _, l := range lines { | ||
87 | + if exp == l { | ||
88 | + found = true | ||
89 | + break | ||
90 | + } | ||
91 | + } | ||
92 | + | ||
93 | + if !found { | ||
94 | + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) | ||
95 | + } | ||
96 | + } | ||
97 | + } | ||
98 | +} | ||
99 | + | ||
100 | +func Test_exportTSMFile(t *testing.T) { | ||
101 | + for _, c := range []struct { | ||
102 | + corpus corpus | ||
103 | + lines []string | ||
104 | + }{ | ||
105 | + {corpus: basicCorpus, lines: basicCorpusExpLines}, | ||
106 | + {corpus: escapeStringCorpus, lines: escCorpusExpLines}, | ||
107 | + } { | ||
108 | + tsmFile := writeCorpusToTSMFile(c.corpus) | ||
109 | + defer os.Remove(tsmFile.Name()) | ||
110 | + | ||
111 | + var out bytes.Buffer | ||
112 | + if err := newCommand().exportTSMFile(tsmFile.Name(), &out); err != nil { | ||
113 | + t.Fatal(err) | ||
114 | + } | ||
115 | + | ||
116 | + lines := strings.Split(out.String(), "\n") | ||
117 | + for _, exp := range c.lines { | ||
118 | + found := false | ||
119 | + for _, l := range lines { | ||
120 | + if exp == l { | ||
121 | + found = true | ||
122 | + break | ||
123 | + } | ||
124 | + } | ||
125 | + | ||
126 | + if !found { | ||
127 | + t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) | ||
128 | + } | ||
129 | + } | ||
130 | + } | ||
131 | +} | ||
132 | + | ||
133 | +var sink interface{} | ||
134 | + | ||
135 | +func benchmarkExportTSM(c corpus, b *testing.B) { | ||
136 | + // Garbage collection is relatively likely to happen during export, so track allocations. | ||
137 | + b.ReportAllocs() | ||
138 | + | ||
139 | + f := writeCorpusToTSMFile(c) | ||
140 | + defer os.Remove(f.Name()) | ||
141 | + | ||
142 | + cmd := newCommand() | ||
143 | + var out bytes.Buffer | ||
144 | + b.ResetTimer() | ||
145 | + b.StartTimer() | ||
146 | + for i := 0; i < b.N; i++ { | ||
147 | + if err := cmd.exportTSMFile(f.Name(), &out); err != nil { | ||
148 | + b.Fatal(err) | ||
149 | + } | ||
150 | + | ||
151 | + sink = out.Bytes() | ||
152 | + out.Reset() | ||
153 | + } | ||
154 | +} | ||
155 | + | ||
156 | +func BenchmarkExportTSMFloats_100s_250vps(b *testing.B) { | ||
157 | + benchmarkExportTSM(makeFloatsCorpus(100, 250), b) | ||
158 | +} | ||
159 | + | ||
160 | +func BenchmarkExportTSMInts_100s_250vps(b *testing.B) { | ||
161 | + benchmarkExportTSM(makeIntsCorpus(100, 250), b) | ||
162 | +} | ||
163 | + | ||
164 | +func BenchmarkExportTSMBools_100s_250vps(b *testing.B) { | ||
165 | + benchmarkExportTSM(makeBoolsCorpus(100, 250), b) | ||
166 | +} | ||
167 | + | ||
168 | +func BenchmarkExportTSMStrings_100s_250vps(b *testing.B) { | ||
169 | + benchmarkExportTSM(makeStringsCorpus(100, 250), b) | ||
170 | +} | ||
171 | + | ||
172 | +func benchmarkExportWAL(c corpus, b *testing.B) { | ||
173 | + // Garbage collection is relatively likely to happen during export, so track allocations. | ||
174 | + b.ReportAllocs() | ||
175 | + | ||
176 | + f := writeCorpusToWALFile(c) | ||
177 | + defer os.Remove(f.Name()) | ||
178 | + | ||
179 | + cmd := newCommand() | ||
180 | + var out bytes.Buffer | ||
181 | + b.ResetTimer() | ||
182 | + b.StartTimer() | ||
183 | + for i := 0; i < b.N; i++ { | ||
184 | + if err := cmd.exportWALFile(f.Name(), &out, func() {}); err != nil { | ||
185 | + b.Fatal(err) | ||
186 | + } | ||
187 | + | ||
188 | + sink = out.Bytes() | ||
189 | + out.Reset() | ||
190 | + } | ||
191 | +} | ||
192 | + | ||
193 | +func BenchmarkExportWALFloats_100s_250vps(b *testing.B) { | ||
194 | + benchmarkExportWAL(makeFloatsCorpus(100, 250), b) | ||
195 | +} | ||
196 | + | ||
197 | +func BenchmarkExportWALInts_100s_250vps(b *testing.B) { | ||
198 | + benchmarkExportWAL(makeIntsCorpus(100, 250), b) | ||
199 | +} | ||
200 | + | ||
201 | +func BenchmarkExportWALBools_100s_250vps(b *testing.B) { | ||
202 | + benchmarkExportWAL(makeBoolsCorpus(100, 250), b) | ||
203 | +} | ||
204 | + | ||
205 | +func BenchmarkExportWALStrings_100s_250vps(b *testing.B) { | ||
206 | + benchmarkExportWAL(makeStringsCorpus(100, 250), b) | ||
207 | +} | ||
208 | + | ||
209 | +// newCommand returns a command that discards its output and that accepts all timestamps. | ||
210 | +func newCommand() *Command { | ||
211 | + return &Command{ | ||
212 | + Stderr: ioutil.Discard, | ||
213 | + Stdout: ioutil.Discard, | ||
214 | + startTime: math.MinInt64, | ||
215 | + endTime: math.MaxInt64, | ||
216 | + } | ||
217 | +} | ||
218 | + | ||
219 | +// makeCorpus returns a new corpus filled with values generated by fn. | ||
220 | +// The RNG passed to fn is seeded with numSeries * numValuesPerSeries, for predictable output. | ||
221 | +func makeCorpus(numSeries, numValuesPerSeries int, fn func(*rand.Rand) interface{}) corpus { | ||
222 | + rng := rand.New(rand.NewSource(int64(numSeries) * int64(numValuesPerSeries))) | ||
223 | + var unixNano int64 | ||
224 | + corpus := make(corpus, numSeries) | ||
225 | + for i := 0; i < numSeries; i++ { | ||
226 | + vals := make([]tsm1.Value, numValuesPerSeries) | ||
227 | + for j := 0; j < numValuesPerSeries; j++ { | ||
228 | + vals[j] = tsm1.NewValue(unixNano, fn(rng)) | ||
229 | + unixNano++ | ||
230 | + } | ||
231 | + | ||
232 | + k := fmt.Sprintf("m,t=%d", i) | ||
233 | + corpus[tsm1.SeriesFieldKey(k, "x")] = vals | ||
234 | + } | ||
235 | + | ||
236 | + return corpus | ||
237 | +} | ||
238 | + | ||
239 | +func makeFloatsCorpus(numSeries, numFloatsPerSeries int) corpus { | ||
240 | + return makeCorpus(numSeries, numFloatsPerSeries, func(rng *rand.Rand) interface{} { | ||
241 | + return rng.Float64() | ||
242 | + }) | ||
243 | +} | ||
244 | + | ||
245 | +func makeIntsCorpus(numSeries, numIntsPerSeries int) corpus { | ||
246 | + return makeCorpus(numSeries, numIntsPerSeries, func(rng *rand.Rand) interface{} { | ||
247 | + // This will only return positive integers. That's probably okay. | ||
248 | + return rng.Int63() | ||
249 | + }) | ||
250 | +} | ||
251 | + | ||
252 | +func makeBoolsCorpus(numSeries, numBoolsPerSeries int) corpus { | ||
253 | + return makeCorpus(numSeries, numBoolsPerSeries, func(rng *rand.Rand) interface{} { | ||
254 | + return rand.Int63n(2) == 1 | ||
255 | + }) | ||
256 | +} | ||
257 | + | ||
258 | +func makeStringsCorpus(numSeries, numStringsPerSeries int) corpus { | ||
259 | + return makeCorpus(numSeries, numStringsPerSeries, func(rng *rand.Rand) interface{} { | ||
260 | + // The string will randomly have 2-6 parts | ||
261 | + parts := make([]string, rand.Intn(4)+2) | ||
262 | + | ||
263 | + for i := range parts { | ||
264 | + // Each part is a random base36-encoded number | ||
265 | + parts[i] = strconv.FormatInt(rand.Int63(), 36) | ||
266 | + } | ||
267 | + | ||
268 | + // Join the individual parts with underscores. | ||
269 | + return strings.Join(parts, "_") | ||
270 | + }) | ||
271 | +} | ||
272 | + | ||
273 | +// writeCorpusToWALFile writes the given corpus as a WAL file, and returns a handle to that file. | ||
274 | +// It is the caller's responsibility to remove the returned temp file. | ||
275 | +// writeCorpusToWALFile will panic on any error that occurs. | ||
276 | +func writeCorpusToWALFile(c corpus) *os.File { | ||
277 | + walFile, err := ioutil.TempFile("", "export_test_corpus_wal") | ||
278 | + if err != nil { | ||
279 | + panic(err) | ||
280 | + } | ||
281 | + | ||
282 | + e := &tsm1.WriteWALEntry{Values: c} | ||
283 | + b, err := e.Encode(nil) | ||
284 | + if err != nil { | ||
285 | + panic(err) | ||
286 | + } | ||
287 | + | ||
288 | + w := tsm1.NewWALSegmentWriter(walFile) | ||
289 | + if err := w.Write(e.Type(), snappy.Encode(nil, b)); err != nil { | ||
290 | + panic(err) | ||
291 | + } | ||
292 | + | ||
293 | + // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway. | ||
294 | + if err := walFile.Sync(); err != nil { | ||
295 | + panic(err) | ||
296 | + } | ||
297 | + | ||
298 | + return walFile | ||
299 | +} | ||
300 | + | ||
301 | +// writeCorpusToTSMFile writes the given corpus as a TSM file, and returns a handle to that file. | ||
302 | +// It is the caller's responsibility to remove the returned temp file. | ||
303 | +// writeCorpusToTSMFile will panic on any error that occurs. | ||
304 | +func writeCorpusToTSMFile(c corpus) *os.File { | ||
305 | + tsmFile, err := ioutil.TempFile("", "export_test_corpus_tsm") | ||
306 | + if err != nil { | ||
307 | + panic(err) | ||
308 | + } | ||
309 | + | ||
310 | + w, err := tsm1.NewTSMWriter(tsmFile) | ||
311 | + if err != nil { | ||
312 | + panic(err) | ||
313 | + } | ||
314 | + | ||
315 | + // Write the series in alphabetical order so that each test run is comparable, | ||
316 | + // given an identical corpus. | ||
317 | + keys := make([]string, 0, len(c)) | ||
318 | + for k := range c { | ||
319 | + keys = append(keys, k) | ||
320 | + } | ||
321 | + sort.Strings(keys) | ||
322 | + for _, k := range keys { | ||
323 | + if err := w.Write(k, c[k]); err != nil { | ||
324 | + panic(err) | ||
325 | + } | ||
326 | + } | ||
327 | + | ||
328 | + if err := w.WriteIndex(); err != nil { | ||
329 | + panic(err) | ||
330 | + } | ||
331 | + | ||
332 | + if err := w.Close(); err != nil { | ||
333 | + panic(err) | ||
334 | + } | ||
335 | + | ||
336 | + return tsmFile | ||
337 | +} |
1 | +// Package help contains the help for the influx_inspect command. | ||
2 | +package help | ||
3 | + | ||
4 | +import ( | ||
5 | + "fmt" | ||
6 | + "io" | ||
7 | + "os" | ||
8 | + "strings" | ||
9 | +) | ||
10 | + | ||
11 | +// Command displays help for command-line sub-commands. | ||
12 | +type Command struct { | ||
13 | + Stdout io.Writer | ||
14 | +} | ||
15 | + | ||
16 | +// NewCommand returns a new instance of Command. | ||
17 | +func NewCommand() *Command { | ||
18 | + return &Command{ | ||
19 | + Stdout: os.Stdout, | ||
20 | + } | ||
21 | +} | ||
22 | + | ||
23 | +// Run executes the command. | ||
24 | +func (cmd *Command) Run(args ...string) error { | ||
25 | + fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) | ||
26 | + return nil | ||
27 | +} | ||
28 | + | ||
29 | +const usage = ` | ||
30 | +Usage: influx_inspect [[command] [arguments]] | ||
31 | + | ||
32 | +The commands are: | ||
33 | + | ||
34 | + dumptsm dumps low-level details about tsm1 files. | ||
35 | + export exports raw data from a shard to line protocol | ||
36 | + help display this help message | ||
37 | + report displays a shard level report | ||
38 | + verify verifies integrity of TSM files | ||
39 | + | ||
40 | +"help" is the default command. | ||
41 | + | ||
42 | +Use "influx_inspect [command] -help" for more information about a command. | ||
43 | +` |
1 | +// The influx_inspect command displays detailed information about InfluxDB data files. | ||
2 | +package main | ||
3 | + | ||
4 | +import ( | ||
5 | + "fmt" | ||
6 | + "io" | ||
7 | + "log" | ||
8 | + "os" | ||
9 | + | ||
10 | + "github.com/influxdata/influxdb/cmd" | ||
11 | + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" | ||
12 | + "github.com/influxdata/influxdb/cmd/influx_inspect/export" | ||
13 | + "github.com/influxdata/influxdb/cmd/influx_inspect/help" | ||
14 | + "github.com/influxdata/influxdb/cmd/influx_inspect/report" | ||
15 | + "github.com/influxdata/influxdb/cmd/influx_inspect/verify" | ||
16 | + _ "github.com/influxdata/influxdb/tsdb/engine" | ||
17 | +) | ||
18 | + | ||
19 | +func main() { | ||
20 | + m := NewMain() | ||
21 | + if err := m.Run(os.Args[1:]...); err != nil { | ||
22 | + fmt.Fprintln(os.Stderr, err) | ||
23 | + os.Exit(1) | ||
24 | + } | ||
25 | +} | ||
26 | + | ||
27 | +// Main represents the program execution. | ||
28 | +type Main struct { | ||
29 | + Logger *log.Logger | ||
30 | + | ||
31 | + Stdin io.Reader | ||
32 | + Stdout io.Writer | ||
33 | + Stderr io.Writer | ||
34 | +} | ||
35 | + | ||
36 | +// NewMain returns a new instance of Main. | ||
37 | +func NewMain() *Main { | ||
38 | + return &Main{ | ||
39 | + Logger: log.New(os.Stderr, "[influx_inspect] ", log.LstdFlags), | ||
40 | + Stdin: os.Stdin, | ||
41 | + Stdout: os.Stdout, | ||
42 | + Stderr: os.Stderr, | ||
43 | + } | ||
44 | +} | ||
45 | + | ||
46 | +// Run determines and runs the command specified by the CLI args. | ||
47 | +func (m *Main) Run(args ...string) error { | ||
48 | + name, args := cmd.ParseCommandName(args) | ||
49 | + | ||
50 | + // Extract name from args. | ||
51 | + switch name { | ||
52 | + case "", "help": | ||
53 | + if err := help.NewCommand().Run(args...); err != nil { | ||
54 | + return fmt.Errorf("help: %s", err) | ||
55 | + } | ||
56 | + case "dumptsmdev": | ||
57 | + fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n") | ||
58 | + fallthrough | ||
59 | + case "dumptsm": | ||
60 | + name := dumptsm.NewCommand() | ||
61 | + if err := name.Run(args...); err != nil { | ||
62 | + return fmt.Errorf("dumptsm: %s", err) | ||
63 | + } | ||
64 | + case "export": | ||
65 | + name := export.NewCommand() | ||
66 | + if err := name.Run(args...); err != nil { | ||
67 | + return fmt.Errorf("export: %s", err) | ||
68 | + } | ||
69 | + case "report": | ||
70 | + name := report.NewCommand() | ||
71 | + if err := name.Run(args...); err != nil { | ||
72 | + return fmt.Errorf("report: %s", err) | ||
73 | + } | ||
74 | + case "verify": | ||
75 | + name := verify.NewCommand() | ||
76 | + if err := name.Run(args...); err != nil { | ||
77 | + return fmt.Errorf("verify: %s", err) | ||
78 | + } | ||
79 | + default: | ||
80 | + return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influx_inspect help' for usage`+"\n\n", name) | ||
81 | + } | ||
82 | + | ||
83 | + return nil | ||
84 | +} |
1 | +// Package report reports statistics about TSM files. | ||
2 | +package report | ||
3 | + | ||
4 | +import ( | ||
5 | + "flag" | ||
6 | + "fmt" | ||
7 | + "io" | ||
8 | + "os" | ||
9 | + "path/filepath" | ||
10 | + "strconv" | ||
11 | + "strings" | ||
12 | + "text/tabwriter" | ||
13 | + "time" | ||
14 | + | ||
15 | + "github.com/influxdata/influxdb/models" | ||
16 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
17 | + "github.com/retailnext/hllpp" | ||
18 | +) | ||
19 | + | ||
20 | +// Command represents the program execution for "influxd report". | ||
21 | +type Command struct { | ||
22 | + Stderr io.Writer | ||
23 | + Stdout io.Writer | ||
24 | + | ||
25 | + dir string | ||
26 | + pattern string | ||
27 | + detailed bool | ||
28 | +} | ||
29 | + | ||
30 | +// NewCommand returns a new instance of Command. | ||
31 | +func NewCommand() *Command { | ||
32 | + return &Command{ | ||
33 | + Stderr: os.Stderr, | ||
34 | + Stdout: os.Stdout, | ||
35 | + } | ||
36 | +} | ||
37 | + | ||
38 | +// Run executes the command. | ||
39 | +func (cmd *Command) Run(args ...string) error { | ||
40 | + fs := flag.NewFlagSet("report", flag.ExitOnError) | ||
41 | + fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern") | ||
42 | + fs.BoolVar(&cmd.detailed, "detailed", false, "Report detailed cardinality estimates") | ||
43 | + | ||
44 | + fs.SetOutput(cmd.Stdout) | ||
45 | + fs.Usage = cmd.printUsage | ||
46 | + | ||
47 | + if err := fs.Parse(args); err != nil { | ||
48 | + return err | ||
49 | + } | ||
50 | + cmd.dir = fs.Arg(0) | ||
51 | + | ||
52 | + start := time.Now() | ||
53 | + | ||
54 | + files, err := filepath.Glob(filepath.Join(cmd.dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) | ||
55 | + if err != nil { | ||
56 | + return err | ||
57 | + } | ||
58 | + | ||
59 | + var filtered []string | ||
60 | + if cmd.pattern != "" { | ||
61 | + for _, f := range files { | ||
62 | + if strings.Contains(f, cmd.pattern) { | ||
63 | + filtered = append(filtered, f) | ||
64 | + } | ||
65 | + } | ||
66 | + files = filtered | ||
67 | + } | ||
68 | + | ||
69 | + if len(files) == 0 { | ||
70 | + return fmt.Errorf("no tsm files at %v\n", cmd.dir) | ||
71 | + } | ||
72 | + | ||
73 | + tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) | ||
74 | + fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t")) | ||
75 | + | ||
76 | + totalSeries := hllpp.New() | ||
77 | + tagCardialities := map[string]*hllpp.HLLPP{} | ||
78 | + measCardinalities := map[string]*hllpp.HLLPP{} | ||
79 | + fieldCardinalities := map[string]*hllpp.HLLPP{} | ||
80 | + | ||
81 | + for _, f := range files { | ||
82 | + file, err := os.OpenFile(f, os.O_RDONLY, 0600) | ||
83 | + if err != nil { | ||
84 | + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", f, err) | ||
85 | + continue | ||
86 | + } | ||
87 | + | ||
88 | + loadStart := time.Now() | ||
89 | + reader, err := tsm1.NewTSMReader(file) | ||
90 | + if err != nil { | ||
91 | + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) | ||
92 | + continue | ||
93 | + } | ||
94 | + loadTime := time.Since(loadStart) | ||
95 | + | ||
96 | + seriesCount := reader.KeyCount() | ||
97 | + for i := 0; i < seriesCount; i++ { | ||
98 | + key, _ := reader.KeyAt(i) | ||
99 | + totalSeries.Add([]byte(key)) | ||
100 | + | ||
101 | + if cmd.detailed { | ||
102 | + sep := strings.Index(string(key), "#!~#") | ||
103 | + seriesKey, field := key[:sep], key[sep+4:] | ||
104 | + measurement, tags, _ := models.ParseKey(seriesKey) | ||
105 | + | ||
106 | + measCount, ok := measCardinalities[measurement] | ||
107 | + if !ok { | ||
108 | + measCount = hllpp.New() | ||
109 | + measCardinalities[measurement] = measCount | ||
110 | + } | ||
111 | + measCount.Add([]byte(key)) | ||
112 | + | ||
113 | + fieldCount, ok := fieldCardinalities[measurement] | ||
114 | + if !ok { | ||
115 | + fieldCount = hllpp.New() | ||
116 | + fieldCardinalities[measurement] = fieldCount | ||
117 | + } | ||
118 | + fieldCount.Add([]byte(field)) | ||
119 | + | ||
120 | + for _, t := range tags { | ||
121 | + tagCount, ok := tagCardialities[string(t.Key)] | ||
122 | + if !ok { | ||
123 | + tagCount = hllpp.New() | ||
124 | + tagCardialities[string(t.Key)] = tagCount | ||
125 | + } | ||
126 | + tagCount.Add(t.Value) | ||
127 | + } | ||
128 | + } | ||
129 | + } | ||
130 | + reader.Close() | ||
131 | + | ||
132 | + fmt.Fprintln(tw, strings.Join([]string{ | ||
133 | + filepath.Base(file.Name()), | ||
134 | + strconv.FormatInt(int64(seriesCount), 10), | ||
135 | + loadTime.String(), | ||
136 | + }, "\t")) | ||
137 | + tw.Flush() | ||
138 | + } | ||
139 | + | ||
140 | + tw.Flush() | ||
141 | + println() | ||
142 | + fmt.Printf("Statistics\n") | ||
143 | + fmt.Printf(" Series:\n") | ||
144 | + fmt.Printf(" Total (est): %d\n", totalSeries.Count()) | ||
145 | + if cmd.detailed { | ||
146 | + fmt.Printf(" Measurements (est):\n") | ||
147 | + for t, card := range measCardinalities { | ||
148 | + fmt.Printf(" %v: %d (%d%%)\n", t, card.Count(), int((float64(card.Count())/float64(totalSeries.Count()))*100)) | ||
149 | + } | ||
150 | + | ||
151 | + fmt.Printf(" Fields (est):\n") | ||
152 | + for t, card := range fieldCardinalities { | ||
153 | + fmt.Printf(" %v: %d\n", t, card.Count()) | ||
154 | + } | ||
155 | + | ||
156 | + fmt.Printf(" Tags (est):\n") | ||
157 | + for t, card := range tagCardialities { | ||
158 | + fmt.Printf(" %v: %d\n", t, card.Count()) | ||
159 | + } | ||
160 | + } | ||
161 | + | ||
162 | + fmt.Printf("Completed in %s\n", time.Since(start)) | ||
163 | + return nil | ||
164 | +} | ||
165 | + | ||
166 | +// printUsage prints the usage message to STDERR. | ||
167 | +func (cmd *Command) printUsage() { | ||
168 | + usage := `Displays shard level report. | ||
169 | + | ||
170 | +Usage: influx_inspect report [flags] | ||
171 | + | ||
172 | + -pattern <pattern> | ||
173 | + Include only files matching a pattern. | ||
174 | + -detailed | ||
175 | + Report detailed cardinality estimates. | ||
176 | + Defaults to "false". | ||
177 | +` | ||
178 | + | ||
179 | + fmt.Fprintf(cmd.Stdout, usage) | ||
180 | +} |
1 | +// Package verify verifies integrity of TSM files. | ||
2 | +package verify | ||
3 | + | ||
4 | +import ( | ||
5 | + "flag" | ||
6 | + "fmt" | ||
7 | + "hash/crc32" | ||
8 | + "io" | ||
9 | + "os" | ||
10 | + "path/filepath" | ||
11 | + "text/tabwriter" | ||
12 | + "time" | ||
13 | + | ||
14 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
15 | +) | ||
16 | + | ||
17 | +// Command represents the program execution for "influx_inspect verify". | ||
18 | +type Command struct { | ||
19 | + Stderr io.Writer | ||
20 | + Stdout io.Writer | ||
21 | +} | ||
22 | + | ||
23 | +// NewCommand returns a new instance of Command. | ||
24 | +func NewCommand() *Command { | ||
25 | + return &Command{ | ||
26 | + Stderr: os.Stderr, | ||
27 | + Stdout: os.Stdout, | ||
28 | + } | ||
29 | +} | ||
30 | + | ||
31 | +// Run executes the command. | ||
32 | +func (cmd *Command) Run(args ...string) error { | ||
33 | + var path string | ||
34 | + fs := flag.NewFlagSet("verify", flag.ExitOnError) | ||
35 | + fs.StringVar(&path, "dir", os.Getenv("HOME")+"/.influxdb", "Root storage path. [$HOME/.influxdb]") | ||
36 | + | ||
37 | + fs.SetOutput(cmd.Stdout) | ||
38 | + fs.Usage = cmd.printUsage | ||
39 | + | ||
40 | + if err := fs.Parse(args); err != nil { | ||
41 | + return err | ||
42 | + } | ||
43 | + | ||
44 | + start := time.Now() | ||
45 | + dataPath := filepath.Join(path, "data") | ||
46 | + | ||
47 | + brokenBlocks := 0 | ||
48 | + totalBlocks := 0 | ||
49 | + | ||
50 | + // No need to do this in a loop | ||
51 | + ext := fmt.Sprintf(".%s", tsm1.TSMFileExtension) | ||
52 | + | ||
53 | + // Get all TSM files by walking through the data dir | ||
54 | + files := []string{} | ||
55 | + err := filepath.Walk(dataPath, func(path string, f os.FileInfo, err error) error { | ||
56 | + if err != nil { | ||
57 | + return err | ||
58 | + } | ||
59 | + if filepath.Ext(path) == ext { | ||
60 | + files = append(files, path) | ||
61 | + } | ||
62 | + return nil | ||
63 | + }) | ||
64 | + if err != nil { | ||
65 | + panic(err) | ||
66 | + } | ||
67 | + | ||
68 | + tw := tabwriter.NewWriter(cmd.Stdout, 16, 8, 0, '\t', 0) | ||
69 | + | ||
70 | + // Verify the checksums of every block in every file | ||
71 | + for _, f := range files { | ||
72 | + file, err := os.OpenFile(f, os.O_RDONLY, 0600) | ||
73 | + if err != nil { | ||
74 | + return err | ||
75 | + } | ||
76 | + | ||
77 | + reader, err := tsm1.NewTSMReader(file) | ||
78 | + if err != nil { | ||
79 | + return err | ||
80 | + } | ||
81 | + | ||
82 | + blockItr := reader.BlockIterator() | ||
83 | + brokenFileBlocks := 0 | ||
84 | + count := 0 | ||
85 | + for blockItr.Next() { | ||
86 | + totalBlocks++ | ||
87 | + key, _, _, _, checksum, buf, err := blockItr.Read() | ||
88 | + if err != nil { | ||
89 | + brokenBlocks++ | ||
90 | + fmt.Fprintf(tw, "%s: could not get checksum for key %v block %d due to error: %q\n", f, key, count, err) | ||
91 | + } else if expected := crc32.ChecksumIEEE(buf); checksum != expected { | ||
92 | + brokenBlocks++ | ||
93 | + fmt.Fprintf(tw, "%s: got %d but expected %d for key %v, block %d\n", f, checksum, expected, key, count) | ||
94 | + } | ||
95 | + count++ | ||
96 | + } | ||
97 | + if brokenFileBlocks == 0 { | ||
98 | + fmt.Fprintf(tw, "%s: healthy\n", f) | ||
99 | + } | ||
100 | + reader.Close() | ||
101 | + } | ||
102 | + | ||
103 | + fmt.Fprintf(tw, "Broken Blocks: %d / %d, in %vs\n", brokenBlocks, totalBlocks, time.Since(start).Seconds()) | ||
104 | + tw.Flush() | ||
105 | + return nil | ||
106 | +} | ||
107 | + | ||
108 | +// printUsage prints the usage message to STDERR. | ||
109 | +func (cmd *Command) printUsage() { | ||
110 | + usage := fmt.Sprintf(`Verifies the integrity of TSM files. | ||
111 | + | ||
112 | +Usage: influx_inspect verify [flags] | ||
113 | + | ||
114 | + -dir <path> | ||
115 | + Root storage path | ||
116 | + Defaults to "%[1]s/.influxdb". | ||
117 | + `, os.Getenv("HOME")) | ||
118 | + | ||
119 | + fmt.Fprintf(cmd.Stdout, usage) | ||
120 | +} |
1 | +# `influx_stress` | ||
2 | + | ||
3 | +If you run into any issues with this tool please mention @jackzampolin when you create an issue. | ||
4 | + | ||
5 | +## Ways to run | ||
6 | + | ||
7 | +### `influx_stress` | ||
8 | +This runs a basic stress test with the [default config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) For more information on the configuration file please see the default. | ||
9 | + | ||
10 | +### `influx_stress -config someConfig.toml` | ||
11 | +This runs the stress test with a valid configuration file located at `someConfig.tom` | ||
12 | + | ||
13 | +### `influx_stress -v2 -config someConfig.iql` | ||
14 | +This runs the stress test with a valid `v2` configuration file. For more information about the `v2` stress test see the [v2 stress README](https://github.com/influxdata/influxdb/blob/master/stress/v2/README.md). | ||
15 | + | ||
16 | +## Flags | ||
17 | + | ||
18 | +If flags are defined they overwrite the config from any file passed in. | ||
19 | + | ||
20 | +### `-addr` string | ||
21 | +IP address and port of database where response times will persist (e.g., localhost:8086) | ||
22 | + | ||
23 | +`default` = "http://localhost:8086" | ||
24 | + | ||
25 | +### `-config` string | ||
26 | +The relative path to the stress test configuration file. | ||
27 | + | ||
28 | +`default` = [config](https://github.com/influxdata/influxdb/blob/master/stress/stress.toml) | ||
29 | + | ||
30 | +### `-cpuprofile` filename | ||
31 | +Writes the result of Go's cpu profile to filename | ||
32 | + | ||
33 | +`default` = no profiling | ||
34 | + | ||
35 | +### `-database` string | ||
36 | +Name of database on `-addr` that `influx_stress` will persist write and query response times | ||
37 | + | ||
38 | +`default` = "stress" | ||
39 | + | ||
40 | +### `-tags` value | ||
41 | +A comma separated list of tags to add to write and query response times. | ||
42 | + | ||
43 | +`default` = "" |
1 | +# This section can be removed | ||
2 | +[provision] | ||
3 | + # The basic provisioner simply deletes and creates database. | ||
4 | + # If `reset_database` is false, it will not attempt to delete the database | ||
5 | + [provision.basic] | ||
6 | + # If enabled the provisioner will actually run | ||
7 | + enabled = true | ||
8 | + # Address of the instance that is to be provisioned | ||
9 | + address = "localhost:8086" | ||
10 | + # Database the will be created/deleted | ||
11 | + database = "stress" | ||
12 | + # Attempt to delete database | ||
13 | + reset_database = true | ||
14 | + | ||
15 | +# This section cannot be commented out | ||
16 | +# To prevent writes set `enabled=false` | ||
17 | +# in [write.influx_client.basic] | ||
18 | +[write] | ||
19 | + [write.point_generator] | ||
20 | + # The basic point generator will generate points of the form | ||
21 | + # `cpu,host=server-%v,location=us-west value=234 123456` | ||
22 | + [write.point_generator.basic] | ||
23 | + # number of points that will be written for each of the series | ||
24 | + point_count = 100 | ||
25 | + # number of series | ||
26 | + series_count = 100000 | ||
27 | + # How much time between each timestamp | ||
28 | + tick = "10s" | ||
29 | + # Randomize timestamp a bit (not functional) | ||
30 | + jitter = true | ||
31 | + # Precision of points that are being written | ||
32 | + precision = "s" | ||
33 | + # name of the measurement that will be written | ||
34 | + measurement = "cpu" | ||
35 | + # The date for the first point that is written into influx | ||
36 | + start_date = "2006-Jan-02" | ||
37 | + # Defines a tag for a series | ||
38 | + [[write.point_generator.basic.tag]] | ||
39 | + key = "host" | ||
40 | + value = "server" | ||
41 | + [[write.point_generator.basic.tag]] | ||
42 | + key = "location" | ||
43 | + value = "us-west" | ||
44 | + # Defines a field for a series | ||
45 | + [[write.point_generator.basic.field]] | ||
46 | + key = "value" | ||
47 | + value = "float64" # supported types: float64, int, bool | ||
48 | + | ||
49 | + | ||
50 | + [write.influx_client] | ||
51 | + [write.influx_client.basic] | ||
52 | + # If enabled the writer will actually write | ||
53 | + enabled = true | ||
54 | + # Addresses is an array of the Influxdb instances | ||
55 | + addresses = ["localhost:8086"] # stress_test_server runs on port 1234 | ||
56 | + # Database that is being written to | ||
57 | + database = "stress" | ||
58 | + # Precision of points that are being written | ||
59 | + precision = "s" | ||
60 | + # Size of batches that are sent to db | ||
61 | + batch_size = 10000 | ||
62 | + # Interval between each batch | ||
63 | + batch_interval = "0s" | ||
64 | + # How many concurrent writers to the db | ||
65 | + concurrency = 10 | ||
66 | + # ssl enabled? | ||
67 | + ssl = false | ||
68 | + # format of points that are written to influxdb | ||
69 | + format = "line_http" # line_udp (not supported yet), graphite_tcp (not supported yet), graphite_udp (not supported yet) | ||
70 | + | ||
71 | +# This section can be removed | ||
72 | +[read] | ||
73 | + [read.query_generator] | ||
74 | + [read.query_generator.basic] | ||
75 | + # Template of the query that will be ran against the instance | ||
76 | + template = "SELECT count(value) FROM cpu where host='server-%v'" | ||
77 | + # How many times the templated query will be ran | ||
78 | + query_count = 250 | ||
79 | + | ||
80 | + [read.query_client] | ||
81 | + [read.query_client.basic] | ||
82 | + # if enabled the reader will actually read | ||
83 | + enabled = true | ||
84 | + # Address of the instance that will be queried | ||
85 | + addresses = ["localhost:8086"] | ||
86 | + # Database that will be queried | ||
87 | + database = "stress" | ||
88 | + # Interval bewteen queries | ||
89 | + query_interval = "100ms" | ||
90 | + # Number of concurrent queriers | ||
91 | + concurrency = 1 | ||
92 | + |
1 | +// Command influx_stress is deprecated; use github.com/influxdata/influx-stress instead. | ||
2 | +package main | ||
3 | + | ||
4 | +import ( | ||
5 | + "flag" | ||
6 | + "fmt" | ||
7 | + "log" | ||
8 | + "os" | ||
9 | + "runtime/pprof" | ||
10 | + | ||
11 | + "github.com/influxdata/influxdb/stress" | ||
12 | + v2 "github.com/influxdata/influxdb/stress/v2" | ||
13 | +) | ||
14 | + | ||
15 | +var ( | ||
16 | + useV2 = flag.Bool("v2", false, "Use version 2 of stress tool") | ||
17 | + config = flag.String("config", "", "The stress test file") | ||
18 | + cpuprofile = flag.String("cpuprofile", "", "Write the cpu profile to `filename`") | ||
19 | + db = flag.String("db", "", "target database within test system for write and query load") | ||
20 | +) | ||
21 | + | ||
22 | +func main() { | ||
23 | + o := stress.NewOutputConfig() | ||
24 | + flag.Parse() | ||
25 | + | ||
26 | + if *cpuprofile != "" { | ||
27 | + f, err := os.Create(*cpuprofile) | ||
28 | + if err != nil { | ||
29 | + fmt.Println(err) | ||
30 | + return | ||
31 | + } | ||
32 | + pprof.StartCPUProfile(f) | ||
33 | + defer pprof.StopCPUProfile() | ||
34 | + } | ||
35 | + | ||
36 | + if *useV2 { | ||
37 | + if *config != "" { | ||
38 | + v2.RunStress(*config) | ||
39 | + } else { | ||
40 | + v2.RunStress("stress/v2/iql/file.iql") | ||
41 | + } | ||
42 | + } else { | ||
43 | + | ||
44 | + c, err := stress.NewConfig(*config) | ||
45 | + if err != nil { | ||
46 | + log.Fatal(err) | ||
47 | + return | ||
48 | + } | ||
49 | + | ||
50 | + if *db != "" { | ||
51 | + c.Provision.Basic.Database = *db | ||
52 | + c.Write.InfluxClients.Basic.Database = *db | ||
53 | + c.Read.QueryClients.Basic.Database = *db | ||
54 | + } | ||
55 | + | ||
56 | + w := stress.NewWriter(c.Write.PointGenerators.Basic, &c.Write.InfluxClients.Basic) | ||
57 | + r := stress.NewQuerier(&c.Read.QueryGenerators.Basic, &c.Read.QueryClients.Basic) | ||
58 | + s := stress.NewStressTest(&c.Provision.Basic, w, r) | ||
59 | + | ||
60 | + bw := stress.NewBroadcastChannel() | ||
61 | + bw.Register(c.Write.InfluxClients.Basic.BasicWriteHandler) | ||
62 | + bw.Register(o.HTTPHandler("write")) | ||
63 | + | ||
64 | + br := stress.NewBroadcastChannel() | ||
65 | + br.Register(c.Read.QueryClients.Basic.BasicReadHandler) | ||
66 | + br.Register(o.HTTPHandler("read")) | ||
67 | + | ||
68 | + s.Start(bw.Handle, br.Handle) | ||
69 | + | ||
70 | + } | ||
71 | +} |
1 | +# Converting b1 and bz1 shards to tsm1 | ||
2 | + | ||
3 | +`influx_tsm` is a tool for converting b1 and bz1 shards to tsm1 | ||
4 | +format. Converting shards to tsm1 format results in a very significant | ||
5 | +reduction in disk usage, and significantly improved write-throughput, | ||
6 | +when writing data into those shards. | ||
7 | + | ||
8 | +Conversion can be controlled on a database-by-database basis. By | ||
9 | +default a database is backed up before it is converted, allowing you | ||
10 | +to roll back any changes. Because of the backup process, ensure the | ||
11 | +host system has at least as much free disk space as the disk space | ||
12 | +consumed by the _data_ directory of your InfluxDB system. | ||
13 | + | ||
14 | +The tool automatically ignores tsm1 shards, and can be run | ||
15 | +idempotently on any database. | ||
16 | + | ||
17 | +Conversion is an offline process, and the InfluxDB system must be | ||
18 | +stopped during conversion. However the conversion process reads and | ||
19 | +writes shards directly on disk and should be fast. | ||
20 | + | ||
21 | +## Steps | ||
22 | + | ||
23 | +Follow these steps to perform a conversion. | ||
24 | + | ||
25 | +* Identify the databases you wish to convert. You can convert one or more databases at a time. By default all databases are converted. | ||
26 | +* Decide on parallel operation. By default the conversion operation peforms each operation in a serial manner. This minimizes load on the host system performing the conversion, but also takes the most time. If you wish to minimize the time conversion takes, enable parallel mode. Conversion will then perform as many operations as possible in parallel, but the process may place significant load on the host system (CPU, disk, and RAM, usage will all increase). | ||
27 | +* Stop all write-traffic to your InfluxDB system. | ||
28 | +* Restart the InfluxDB service and wait until all WAL data is flushed to disk -- this has completed when the system responds to queries. This is to ensure all data is present in shards. | ||
29 | +* Stop the InfluxDB service. It should not be restarted until conversion is complete. | ||
30 | +* Run conversion tool. Depending on the size of the data directory, this might be a lengthy operation. Consider running the conversion tool under a "screen" session to avoid any interruptions. | ||
31 | +* Unless you ran the conversion tool as the same user as that which runs InfluxDB, then you may need to set the correct read-and-write permissions on the new tsm1 directories. | ||
32 | +* Restart node and ensure data looks correct. | ||
33 | +* If everything looks OK, you may then wish to remove or archive the backed-up databases. | ||
34 | +* Restart write traffic. | ||
35 | + | ||
36 | +## Example session | ||
37 | + | ||
38 | +Below is an example session, showing a database being converted. | ||
39 | + | ||
40 | +``` | ||
41 | +$ # Create a backup location that the `influxdb` user has full access to | ||
42 | +$ mkdir -m 0777 /path/to/influxdb_backup | ||
43 | +$ sudo -u influxdb influx_tsm -backup /path/to/influxdb_backup -parallel /var/lib/influxdb/data | ||
44 | + | ||
45 | +b1 and bz1 shard conversion. | ||
46 | +----------------------------------- | ||
47 | +Data directory is: /var/lib/influxdb/data | ||
48 | +Backup directory is: /path/to/influxdb_backup | ||
49 | +Databases specified: all | ||
50 | +Database backups enabled: yes | ||
51 | +Parallel mode enabled (GOMAXPROCS): yes (8) | ||
52 | + | ||
53 | + | ||
54 | +Found 1 shards that will be converted. | ||
55 | + | ||
56 | +Database Retention Path Engine Size | ||
57 | +_internal monitor /var/lib/influxdb/data/_internal/monitor/1 bz1 65536 | ||
58 | + | ||
59 | +These shards will be converted. Proceed? y/N: y | ||
60 | +Conversion starting.... | ||
61 | +Backing up 1 databases... | ||
62 | +2016/01/28 12:23:43.699266 Backup of databse '_internal' started | ||
63 | +2016/01/28 12:23:43.699883 Backing up file /var/lib/influxdb/data/_internal/monitor/1 | ||
64 | +2016/01/28 12:23:43.700052 Database _internal backed up (851.776µs) | ||
65 | +2016/01/28 12:23:43.700320 Starting conversion of shard: /var/lib/influxdb/data/_internal/monitor/1 | ||
66 | +2016/01/28 12:23:43.706276 Conversion of /var/lib/influxdb/data/_internal/monitor/1 successful (6.040148ms) | ||
67 | + | ||
68 | +Summary statistics | ||
69 | +======================================== | ||
70 | +Databases converted: 1 | ||
71 | +Shards converted: 1 | ||
72 | +TSM files created: 1 | ||
73 | +Points read: 369 | ||
74 | +Points written: 369 | ||
75 | +NaN filtered: 0 | ||
76 | +Inf filtered: 0 | ||
77 | +Points without fields filtered: 0 | ||
78 | +Disk usage pre-conversion (bytes): 65536 | ||
79 | +Disk usage post-conversion (bytes): 11000 | ||
80 | +Reduction factor: 83% | ||
81 | +Bytes per TSM point: 29.81 | ||
82 | +Total conversion time: 7.330443ms | ||
83 | + | ||
84 | +$ # restart node, verify data | ||
85 | +$ sudo rm -r /path/to/influxdb_backup | ||
86 | +``` | ||
87 | + | ||
88 | +Note that the tool first lists the shards that will be converted, | ||
89 | +before asking for confirmation. You can abort the conversion process | ||
90 | +at this step if you just wish to see what would be converted, or if | ||
91 | +the list of shards does not look correct. | ||
92 | + | ||
93 | +__WARNING:__ If you run the `influx_tsm` tool as a user other than the | ||
94 | +`influxdb` user (or the user that the InfluxDB process runs under), | ||
95 | +please make sure to verify the shard permissions are correct prior to | ||
96 | +starting InfluxDB. If needed, shard permissions can be corrected with | ||
97 | +the `chown` command. For example: | ||
98 | + | ||
99 | +``` | ||
100 | +sudo chown -R influxdb:influxdb /var/lib/influxdb | ||
101 | +``` | ||
102 | + | ||
103 | +## Rolling back a conversion | ||
104 | + | ||
105 | +After a successful backup (the message `Database XYZ backed up` was | ||
106 | +logged), you have a duplicate of that database in the _backup_ | ||
107 | +directory you provided on the command line. If, when checking your | ||
108 | +data after a successful conversion, you notice things missing or | ||
109 | +something just isn't right, you can "undo" the conversion: | ||
110 | + | ||
111 | +- Shut down your node (this is very important) | ||
112 | +- Remove the database's directory from the influxdb `data` directory (default: `~/.influxdb/data/XYZ` for binary installations or `/var/lib/influxdb/data/XYZ` for packaged installations) | ||
113 | +- Copy (to really make sure the shard is preserved) the database's directory from the backup directory you created into the `data` directory. | ||
114 | + | ||
115 | +Using the same directories as above, and assuming a database named `stats`: | ||
116 | + | ||
117 | +``` | ||
118 | +$ sudo rm -r /var/lib/influxdb/data/stats | ||
119 | +$ sudo cp -r /path/to/influxdb_backup/stats /var/lib/influxdb/data/ | ||
120 | +$ # restart influxd node | ||
121 | +``` | ||
122 | + | ||
123 | +#### How to avoid downtime when upgrading shards | ||
124 | + | ||
125 | +*Identify non-`tsm1` shards* | ||
126 | + | ||
127 | +Non-`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>`. | ||
128 | + | ||
129 | +`tsm1` shards are files of the form: `data/<database>/<retention_policy>/<shard_id>/<file>.tsm`. | ||
130 | + | ||
131 | +*Determine which `bz`/`bz1` shards are cold for writes* | ||
132 | + | ||
133 | +Run the `SHOW SHARDS` query to see the start and end dates for shards. | ||
134 | +If the date range for a shard does not span the current time then the shard is said to be cold for writes. | ||
135 | +This means that no new points are expected to be added to the shard. | ||
136 | +The shard whose date range spans now is said to be hot for writes. | ||
137 | +You can only safely convert cold shards without stopping the InfluxDB process. | ||
138 | + | ||
139 | +*Convert cold shards* | ||
140 | + | ||
141 | +1. Copy each of the cold shards you'd like to convert to a new directory with the structure `/tmp/data/<database>/<retention_policy>/<shard_id>`. | ||
142 | +2. Run the `influx_tsm` tool on the copied files: | ||
143 | +``` | ||
144 | +influx_tsm -parallel /tmp/data/ | ||
145 | +``` | ||
146 | +3. Remove the existing cold `b1`/`bz1` shards from the production data directory. | ||
147 | +4. Move the new `tsm1` shards into the original directory, overwriting the existing `b1`/`bz1` shards of the same name. Do this simultaneously with step 3 to avoid any query errors. | ||
148 | +5. Wait an hour, a day, or a week (depending on your retention period) for any hot `b1`/`bz1` shards to become cold and repeat steps 1 through 4 on the newly cold shards. | ||
149 | + | ||
150 | +> **Note:** Any points written to the cold shards after making a copy will be lost when the `tsm1` shard overwrites the existing cold shard. | ||
151 | +Nothing in InfluxDB will prevent writes to cold shards, they are merely unexpected, not impossible. | ||
152 | +It is your responsibility to prevent writes to cold shards to prevent data loss. |
1 | +// Package b1 reads data from b1 shards. | ||
2 | +package b1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/b1" | ||
3 | + | ||
4 | +import ( | ||
5 | + "encoding/binary" | ||
6 | + "math" | ||
7 | + "sort" | ||
8 | + "time" | ||
9 | + | ||
10 | + "github.com/boltdb/bolt" | ||
11 | + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" | ||
12 | + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" | ||
13 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
14 | +) | ||
15 | + | ||
16 | +// DefaultChunkSize is the size of chunks read from the b1 shard | ||
17 | +const DefaultChunkSize int = 1000 | ||
18 | + | ||
19 | +var excludedBuckets = map[string]bool{ | ||
20 | + "fields": true, | ||
21 | + "meta": true, | ||
22 | + "series": true, | ||
23 | + "wal": true, | ||
24 | +} | ||
25 | + | ||
26 | +// Reader is used to read all data from a b1 shard. | ||
27 | +type Reader struct { | ||
28 | + path string | ||
29 | + db *bolt.DB | ||
30 | + tx *bolt.Tx | ||
31 | + | ||
32 | + cursors []*cursor | ||
33 | + currCursor int | ||
34 | + | ||
35 | + keyBuf string | ||
36 | + values []tsm1.Value | ||
37 | + valuePos int | ||
38 | + | ||
39 | + fields map[string]*tsdb.MeasurementFields | ||
40 | + codecs map[string]*tsdb.FieldCodec | ||
41 | + | ||
42 | + stats *stats.Stats | ||
43 | +} | ||
44 | + | ||
45 | +// NewReader returns a reader for the b1 shard at path. | ||
46 | +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { | ||
47 | + r := &Reader{ | ||
48 | + path: path, | ||
49 | + fields: make(map[string]*tsdb.MeasurementFields), | ||
50 | + codecs: make(map[string]*tsdb.FieldCodec), | ||
51 | + stats: stats, | ||
52 | + } | ||
53 | + | ||
54 | + if chunkSize <= 0 { | ||
55 | + chunkSize = DefaultChunkSize | ||
56 | + } | ||
57 | + | ||
58 | + r.values = make([]tsm1.Value, chunkSize) | ||
59 | + | ||
60 | + return r | ||
61 | +} | ||
62 | + | ||
63 | +// Open opens the reader. | ||
64 | +func (r *Reader) Open() error { | ||
65 | + // Open underlying storage. | ||
66 | + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) | ||
67 | + if err != nil { | ||
68 | + return err | ||
69 | + } | ||
70 | + r.db = db | ||
71 | + | ||
72 | + // Load fields. | ||
73 | + if err := r.db.View(func(tx *bolt.Tx) error { | ||
74 | + meta := tx.Bucket([]byte("fields")) | ||
75 | + c := meta.Cursor() | ||
76 | + | ||
77 | + for k, v := c.First(); k != nil; k, v = c.Next() { | ||
78 | + mf := &tsdb.MeasurementFields{} | ||
79 | + if err := mf.UnmarshalBinary(v); err != nil { | ||
80 | + return err | ||
81 | + } | ||
82 | + r.fields[string(k)] = mf | ||
83 | + r.codecs[string(k)] = tsdb.NewFieldCodec(mf.Fields) | ||
84 | + } | ||
85 | + return nil | ||
86 | + }); err != nil { | ||
87 | + return err | ||
88 | + } | ||
89 | + | ||
90 | + seriesSet := make(map[string]bool) | ||
91 | + | ||
92 | + // ignore series index and find all series in this shard | ||
93 | + if err := r.db.View(func(tx *bolt.Tx) error { | ||
94 | + tx.ForEach(func(name []byte, _ *bolt.Bucket) error { | ||
95 | + key := string(name) | ||
96 | + if !excludedBuckets[key] { | ||
97 | + seriesSet[key] = true | ||
98 | + } | ||
99 | + return nil | ||
100 | + }) | ||
101 | + return nil | ||
102 | + }); err != nil { | ||
103 | + return err | ||
104 | + } | ||
105 | + | ||
106 | + r.tx, err = r.db.Begin(false) | ||
107 | + if err != nil { | ||
108 | + return err | ||
109 | + } | ||
110 | + | ||
111 | + // Create cursor for each field of each series. | ||
112 | + for s := range seriesSet { | ||
113 | + measurement := tsdb.MeasurementFromSeriesKey(s) | ||
114 | + fields := r.fields[measurement] | ||
115 | + if fields == nil { | ||
116 | + r.stats.IncrFiltered() | ||
117 | + continue | ||
118 | + } | ||
119 | + for _, f := range fields.Fields { | ||
120 | + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) | ||
121 | + c.SeekTo(0) | ||
122 | + r.cursors = append(r.cursors, c) | ||
123 | + } | ||
124 | + } | ||
125 | + sort.Sort(cursors(r.cursors)) | ||
126 | + | ||
127 | + return nil | ||
128 | +} | ||
129 | + | ||
130 | +// Next returns whether any data remains to be read. It must be called before | ||
131 | +// the next call to Read(). | ||
132 | +func (r *Reader) Next() bool { | ||
133 | + r.valuePos = 0 | ||
134 | +OUTER: | ||
135 | + for { | ||
136 | + if r.currCursor >= len(r.cursors) { | ||
137 | + // All cursors drained. No more data remains. | ||
138 | + return false | ||
139 | + } | ||
140 | + | ||
141 | + cc := r.cursors[r.currCursor] | ||
142 | + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) | ||
143 | + | ||
144 | + for { | ||
145 | + k, v := cc.Next() | ||
146 | + if k == -1 { | ||
147 | + // Go to next cursor and try again. | ||
148 | + r.currCursor++ | ||
149 | + if r.valuePos == 0 { | ||
150 | + // The previous cursor had no data. Instead of returning | ||
151 | + // just go immediately to the next cursor. | ||
152 | + continue OUTER | ||
153 | + } | ||
154 | + // There is some data available. Indicate that it should be read. | ||
155 | + return true | ||
156 | + } | ||
157 | + | ||
158 | + if f, ok := v.(float64); ok { | ||
159 | + if math.IsInf(f, 0) { | ||
160 | + r.stats.AddPointsRead(1) | ||
161 | + r.stats.IncrInf() | ||
162 | + continue | ||
163 | + } | ||
164 | + | ||
165 | + if math.IsNaN(f) { | ||
166 | + r.stats.AddPointsRead(1) | ||
167 | + r.stats.IncrNaN() | ||
168 | + continue | ||
169 | + } | ||
170 | + } | ||
171 | + | ||
172 | + r.values[r.valuePos] = tsm1.NewValue(k, v) | ||
173 | + r.valuePos++ | ||
174 | + | ||
175 | + if r.valuePos >= len(r.values) { | ||
176 | + return true | ||
177 | + } | ||
178 | + } | ||
179 | + } | ||
180 | +} | ||
181 | + | ||
182 | +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is | ||
183 | +// emitted completely for every field, in every series, before the next field is processed. | ||
184 | +// Data from Read() adheres to the requirements for writing to tsm1 shards | ||
185 | +func (r *Reader) Read() (string, []tsm1.Value, error) { | ||
186 | + return r.keyBuf, r.values[:r.valuePos], nil | ||
187 | +} | ||
188 | + | ||
189 | +// Close closes the reader. | ||
190 | +func (r *Reader) Close() error { | ||
191 | + r.tx.Rollback() | ||
192 | + return r.db.Close() | ||
193 | +} | ||
194 | + | ||
195 | +// cursor provides ordered iteration across a series. | ||
196 | +type cursor struct { | ||
197 | + // Bolt cursor and readahead buffer. | ||
198 | + cursor *bolt.Cursor | ||
199 | + keyBuf int64 | ||
200 | + valBuf interface{} | ||
201 | + | ||
202 | + series string | ||
203 | + field string | ||
204 | + dec *tsdb.FieldCodec | ||
205 | +} | ||
206 | + | ||
207 | +// Cursor returns an iterator for a key over a single field. | ||
208 | +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { | ||
209 | + cur := &cursor{ | ||
210 | + keyBuf: -2, | ||
211 | + series: series, | ||
212 | + field: field, | ||
213 | + dec: dec, | ||
214 | + } | ||
215 | + | ||
216 | + // Retrieve series bucket. | ||
217 | + b := tx.Bucket([]byte(series)) | ||
218 | + if b != nil { | ||
219 | + cur.cursor = b.Cursor() | ||
220 | + } | ||
221 | + | ||
222 | + return cur | ||
223 | +} | ||
224 | + | ||
225 | +// Seek moves the cursor to a position. | ||
226 | +func (c *cursor) SeekTo(seek int64) { | ||
227 | + var seekBytes [8]byte | ||
228 | + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) | ||
229 | + k, v := c.cursor.Seek(seekBytes[:]) | ||
230 | + c.keyBuf, c.valBuf = tsdb.DecodeKeyValue(c.field, c.dec, k, v) | ||
231 | +} | ||
232 | + | ||
233 | +// Next returns the next key/value pair from the cursor. | ||
234 | +func (c *cursor) Next() (key int64, value interface{}) { | ||
235 | + for { | ||
236 | + k, v := func() (int64, interface{}) { | ||
237 | + if c.keyBuf != -2 { | ||
238 | + k, v := c.keyBuf, c.valBuf | ||
239 | + c.keyBuf = -2 | ||
240 | + return k, v | ||
241 | + } | ||
242 | + | ||
243 | + k, v := c.cursor.Next() | ||
244 | + if k == nil { | ||
245 | + return -1, nil | ||
246 | + } | ||
247 | + return tsdb.DecodeKeyValue(c.field, c.dec, k, v) | ||
248 | + }() | ||
249 | + | ||
250 | + if k != -1 && v == nil { | ||
251 | + // There is a point in the series at the next timestamp, | ||
252 | + // but not for this cursor's field. Go to the next point. | ||
253 | + continue | ||
254 | + } | ||
255 | + return k, v | ||
256 | + } | ||
257 | +} | ||
258 | + | ||
259 | +// Sort b1 cursors in correct order for writing to TSM files. | ||
260 | + | ||
261 | +type cursors []*cursor | ||
262 | + | ||
263 | +func (a cursors) Len() int { return len(a) } | ||
264 | +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | ||
265 | +func (a cursors) Less(i, j int) bool { | ||
266 | + if a[i].series == a[j].series { | ||
267 | + return a[i].field < a[j].field | ||
268 | + } | ||
269 | + return a[i].series < a[j].series | ||
270 | +} |
1 | +// Package bz1 reads data from bz1 shards. | ||
2 | +package bz1 // import "github.com/influxdata/influxdb/cmd/influx_tsm/bz1" | ||
3 | + | ||
4 | +import ( | ||
5 | + "bytes" | ||
6 | + "encoding/binary" | ||
7 | + "encoding/json" | ||
8 | + "fmt" | ||
9 | + "math" | ||
10 | + "sort" | ||
11 | + "time" | ||
12 | + | ||
13 | + "github.com/boltdb/bolt" | ||
14 | + "github.com/golang/snappy" | ||
15 | + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" | ||
16 | + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" | ||
17 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
18 | +) | ||
19 | + | ||
20 | +// DefaultChunkSize is the size of chunks read from the bz1 shard | ||
21 | +const DefaultChunkSize = 1000 | ||
22 | + | ||
23 | +// Reader is used to read all data from a bz1 shard. | ||
24 | +type Reader struct { | ||
25 | + path string | ||
26 | + db *bolt.DB | ||
27 | + tx *bolt.Tx | ||
28 | + | ||
29 | + cursors []*cursor | ||
30 | + currCursor int | ||
31 | + | ||
32 | + keyBuf string | ||
33 | + values []tsm1.Value | ||
34 | + valuePos int | ||
35 | + | ||
36 | + fields map[string]*tsdb.MeasurementFields | ||
37 | + codecs map[string]*tsdb.FieldCodec | ||
38 | + | ||
39 | + stats *stats.Stats | ||
40 | +} | ||
41 | + | ||
42 | +// NewReader returns a reader for the bz1 shard at path. | ||
43 | +func NewReader(path string, stats *stats.Stats, chunkSize int) *Reader { | ||
44 | + r := &Reader{ | ||
45 | + path: path, | ||
46 | + fields: make(map[string]*tsdb.MeasurementFields), | ||
47 | + codecs: make(map[string]*tsdb.FieldCodec), | ||
48 | + stats: stats, | ||
49 | + } | ||
50 | + | ||
51 | + if chunkSize <= 0 { | ||
52 | + chunkSize = DefaultChunkSize | ||
53 | + } | ||
54 | + | ||
55 | + r.values = make([]tsm1.Value, chunkSize) | ||
56 | + | ||
57 | + return r | ||
58 | +} | ||
59 | + | ||
60 | +// Open opens the reader. | ||
61 | +func (r *Reader) Open() error { | ||
62 | + // Open underlying storage. | ||
63 | + db, err := bolt.Open(r.path, 0666, &bolt.Options{Timeout: 1 * time.Second}) | ||
64 | + if err != nil { | ||
65 | + return err | ||
66 | + } | ||
67 | + r.db = db | ||
68 | + | ||
69 | + seriesSet := make(map[string]bool) | ||
70 | + | ||
71 | + if err := r.db.View(func(tx *bolt.Tx) error { | ||
72 | + var data []byte | ||
73 | + | ||
74 | + meta := tx.Bucket([]byte("meta")) | ||
75 | + if meta == nil { | ||
76 | + // No data in this shard. | ||
77 | + return nil | ||
78 | + } | ||
79 | + | ||
80 | + pointsBucket := tx.Bucket([]byte("points")) | ||
81 | + if pointsBucket == nil { | ||
82 | + return nil | ||
83 | + } | ||
84 | + | ||
85 | + if err := pointsBucket.ForEach(func(key, _ []byte) error { | ||
86 | + seriesSet[string(key)] = true | ||
87 | + return nil | ||
88 | + }); err != nil { | ||
89 | + return err | ||
90 | + } | ||
91 | + | ||
92 | + buf := meta.Get([]byte("fields")) | ||
93 | + if buf == nil { | ||
94 | + // No data in this shard. | ||
95 | + return nil | ||
96 | + } | ||
97 | + | ||
98 | + data, err = snappy.Decode(nil, buf) | ||
99 | + if err != nil { | ||
100 | + return err | ||
101 | + } | ||
102 | + if err := json.Unmarshal(data, &r.fields); err != nil { | ||
103 | + return err | ||
104 | + } | ||
105 | + return nil | ||
106 | + }); err != nil { | ||
107 | + return err | ||
108 | + } | ||
109 | + | ||
110 | + // Build the codec for each measurement. | ||
111 | + for k, v := range r.fields { | ||
112 | + r.codecs[k] = tsdb.NewFieldCodec(v.Fields) | ||
113 | + } | ||
114 | + | ||
115 | + r.tx, err = r.db.Begin(false) | ||
116 | + if err != nil { | ||
117 | + return err | ||
118 | + } | ||
119 | + | ||
120 | + // Create cursor for each field of each series. | ||
121 | + for s := range seriesSet { | ||
122 | + measurement := tsdb.MeasurementFromSeriesKey(s) | ||
123 | + fields := r.fields[measurement] | ||
124 | + if fields == nil { | ||
125 | + r.stats.IncrFiltered() | ||
126 | + continue | ||
127 | + } | ||
128 | + for _, f := range fields.Fields { | ||
129 | + c := newCursor(r.tx, s, f.Name, r.codecs[measurement]) | ||
130 | + if c == nil { | ||
131 | + continue | ||
132 | + } | ||
133 | + c.SeekTo(0) | ||
134 | + r.cursors = append(r.cursors, c) | ||
135 | + } | ||
136 | + } | ||
137 | + sort.Sort(cursors(r.cursors)) | ||
138 | + | ||
139 | + return nil | ||
140 | +} | ||
141 | + | ||
142 | +// Next returns whether there is any more data to be read. | ||
143 | +func (r *Reader) Next() bool { | ||
144 | + r.valuePos = 0 | ||
145 | +OUTER: | ||
146 | + for { | ||
147 | + if r.currCursor >= len(r.cursors) { | ||
148 | + // All cursors drained. No more data remains. | ||
149 | + return false | ||
150 | + } | ||
151 | + | ||
152 | + cc := r.cursors[r.currCursor] | ||
153 | + r.keyBuf = tsm1.SeriesFieldKey(cc.series, cc.field) | ||
154 | + | ||
155 | + for { | ||
156 | + k, v := cc.Next() | ||
157 | + if k == -1 { | ||
158 | + // Go to next cursor and try again. | ||
159 | + r.currCursor++ | ||
160 | + if r.valuePos == 0 { | ||
161 | + // The previous cursor had no data. Instead of returning | ||
162 | + // just go immediately to the next cursor. | ||
163 | + continue OUTER | ||
164 | + } | ||
165 | + // There is some data available. Indicate that it should be read. | ||
166 | + return true | ||
167 | + } | ||
168 | + | ||
169 | + if f, ok := v.(float64); ok { | ||
170 | + if math.IsInf(f, 0) { | ||
171 | + r.stats.AddPointsRead(1) | ||
172 | + r.stats.IncrInf() | ||
173 | + continue | ||
174 | + } | ||
175 | + | ||
176 | + if math.IsNaN(f) { | ||
177 | + r.stats.AddPointsRead(1) | ||
178 | + r.stats.IncrNaN() | ||
179 | + continue | ||
180 | + } | ||
181 | + } | ||
182 | + | ||
183 | + r.values[r.valuePos] = tsm1.NewValue(k, v) | ||
184 | + r.valuePos++ | ||
185 | + | ||
186 | + if r.valuePos >= len(r.values) { | ||
187 | + return true | ||
188 | + } | ||
189 | + } | ||
190 | + } | ||
191 | +} | ||
192 | + | ||
193 | +// Read returns the next chunk of data in the shard, converted to tsm1 values. Data is | ||
194 | +// emitted completely for every field, in every series, before the next field is processed. | ||
195 | +// Data from Read() adheres to the requirements for writing to tsm1 shards | ||
196 | +func (r *Reader) Read() (string, []tsm1.Value, error) { | ||
197 | + return r.keyBuf, r.values[:r.valuePos], nil | ||
198 | +} | ||
199 | + | ||
200 | +// Close closes the reader. | ||
201 | +func (r *Reader) Close() error { | ||
202 | + r.tx.Rollback() | ||
203 | + return r.db.Close() | ||
204 | +} | ||
205 | + | ||
206 | +// cursor provides ordered iteration across a series. | ||
207 | +type cursor struct { | ||
208 | + cursor *bolt.Cursor | ||
209 | + buf []byte // uncompressed buffer | ||
210 | + off int // buffer offset | ||
211 | + fieldIndices []int | ||
212 | + index int | ||
213 | + | ||
214 | + series string | ||
215 | + field string | ||
216 | + dec *tsdb.FieldCodec | ||
217 | + | ||
218 | + keyBuf int64 | ||
219 | + valBuf interface{} | ||
220 | +} | ||
221 | + | ||
222 | +// newCursor returns an instance of a bz1 cursor. | ||
223 | +func newCursor(tx *bolt.Tx, series string, field string, dec *tsdb.FieldCodec) *cursor { | ||
224 | + // Retrieve points bucket. Ignore if there is no bucket. | ||
225 | + b := tx.Bucket([]byte("points")).Bucket([]byte(series)) | ||
226 | + if b == nil { | ||
227 | + return nil | ||
228 | + } | ||
229 | + | ||
230 | + return &cursor{ | ||
231 | + cursor: b.Cursor(), | ||
232 | + series: series, | ||
233 | + field: field, | ||
234 | + dec: dec, | ||
235 | + keyBuf: -2, | ||
236 | + } | ||
237 | +} | ||
238 | + | ||
239 | +// Seek moves the cursor to a position. | ||
240 | +func (c *cursor) SeekTo(seek int64) { | ||
241 | + var seekBytes [8]byte | ||
242 | + binary.BigEndian.PutUint64(seekBytes[:], uint64(seek)) | ||
243 | + | ||
244 | + // Move cursor to appropriate block and set to buffer. | ||
245 | + k, v := c.cursor.Seek(seekBytes[:]) | ||
246 | + if v == nil { // get the last block, it might have this time | ||
247 | + _, v = c.cursor.Last() | ||
248 | + } else if seek < int64(binary.BigEndian.Uint64(k)) { // the seek key is less than this block, go back one and check | ||
249 | + _, v = c.cursor.Prev() | ||
250 | + | ||
251 | + // if the previous block max time is less than the seek value, reset to where we were originally | ||
252 | + if v == nil || seek > int64(binary.BigEndian.Uint64(v[0:8])) { | ||
253 | + _, v = c.cursor.Seek(seekBytes[:]) | ||
254 | + } | ||
255 | + } | ||
256 | + c.setBuf(v) | ||
257 | + | ||
258 | + // Read current block up to seek position. | ||
259 | + c.seekBuf(seekBytes[:]) | ||
260 | + | ||
261 | + // Return current entry. | ||
262 | + c.keyBuf, c.valBuf = c.read() | ||
263 | +} | ||
264 | + | ||
265 | +// seekBuf moves the cursor to a position within the current buffer. | ||
266 | +func (c *cursor) seekBuf(seek []byte) (key, value []byte) { | ||
267 | + for { | ||
268 | + // Slice off the current entry. | ||
269 | + buf := c.buf[c.off:] | ||
270 | + | ||
271 | + // Exit if current entry's timestamp is on or after the seek. | ||
272 | + if len(buf) == 0 { | ||
273 | + return | ||
274 | + } | ||
275 | + | ||
276 | + if bytes.Compare(buf[0:8], seek) != -1 { | ||
277 | + return | ||
278 | + } | ||
279 | + | ||
280 | + c.off += entryHeaderSize + entryDataSize(buf) | ||
281 | + } | ||
282 | +} | ||
283 | + | ||
284 | +// Next returns the next key/value pair from the cursor. If there are no values | ||
285 | +// remaining, -1 is returned. | ||
286 | +func (c *cursor) Next() (int64, interface{}) { | ||
287 | + for { | ||
288 | + k, v := func() (int64, interface{}) { | ||
289 | + if c.keyBuf != -2 { | ||
290 | + k, v := c.keyBuf, c.valBuf | ||
291 | + c.keyBuf = -2 | ||
292 | + return k, v | ||
293 | + } | ||
294 | + | ||
295 | + // Ignore if there is no buffer. | ||
296 | + if len(c.buf) == 0 { | ||
297 | + return -1, nil | ||
298 | + } | ||
299 | + | ||
300 | + // Move forward to next entry. | ||
301 | + c.off += entryHeaderSize + entryDataSize(c.buf[c.off:]) | ||
302 | + | ||
303 | + // If no items left then read first item from next block. | ||
304 | + if c.off >= len(c.buf) { | ||
305 | + _, v := c.cursor.Next() | ||
306 | + c.setBuf(v) | ||
307 | + } | ||
308 | + | ||
309 | + return c.read() | ||
310 | + }() | ||
311 | + | ||
312 | + if k != -1 && v == nil { | ||
313 | + // There is a point in the series at the next timestamp, | ||
314 | + // but not for this cursor's field. Go to the next point. | ||
315 | + continue | ||
316 | + } | ||
317 | + return k, v | ||
318 | + } | ||
319 | +} | ||
320 | + | ||
321 | +// setBuf saves a compressed block to the buffer. | ||
322 | +func (c *cursor) setBuf(block []byte) { | ||
323 | + // Clear if the block is empty. | ||
324 | + if len(block) == 0 { | ||
325 | + c.buf, c.off, c.fieldIndices, c.index = c.buf[0:0], 0, c.fieldIndices[0:0], 0 | ||
326 | + return | ||
327 | + } | ||
328 | + | ||
329 | + // Otherwise decode block into buffer. | ||
330 | + // Skip over the first 8 bytes since they are the max timestamp. | ||
331 | + buf, err := snappy.Decode(nil, block[8:]) | ||
332 | + if err != nil { | ||
333 | + c.buf = c.buf[0:0] | ||
334 | + fmt.Printf("block decode error: %s\n", err) | ||
335 | + } | ||
336 | + | ||
337 | + c.buf, c.off = buf, 0 | ||
338 | +} | ||
339 | + | ||
340 | +// read reads the current key and value from the current block. | ||
341 | +func (c *cursor) read() (key int64, value interface{}) { | ||
342 | + // Return nil if the offset is at the end of the buffer. | ||
343 | + if c.off >= len(c.buf) { | ||
344 | + return -1, nil | ||
345 | + } | ||
346 | + | ||
347 | + // Otherwise read the current entry. | ||
348 | + buf := c.buf[c.off:] | ||
349 | + dataSize := entryDataSize(buf) | ||
350 | + | ||
351 | + return tsdb.DecodeKeyValue(c.field, c.dec, buf[0:8], buf[entryHeaderSize:entryHeaderSize+dataSize]) | ||
352 | +} | ||
353 | + | ||
354 | +// Sort bz1 cursors in correct order for writing to TSM files. | ||
355 | + | ||
356 | +type cursors []*cursor | ||
357 | + | ||
358 | +func (a cursors) Len() int { return len(a) } | ||
359 | +func (a cursors) Swap(i, j int) { a[i], a[j] = a[j], a[i] } | ||
360 | +func (a cursors) Less(i, j int) bool { | ||
361 | + if a[i].series == a[j].series { | ||
362 | + return a[i].field < a[j].field | ||
363 | + } | ||
364 | + return a[i].series < a[j].series | ||
365 | +} | ||
366 | + | ||
367 | +// entryHeaderSize is the number of bytes required for the header. | ||
368 | +const entryHeaderSize = 8 + 4 | ||
369 | + | ||
370 | +// entryDataSize returns the size of an entry's data field, in bytes. | ||
371 | +func entryDataSize(v []byte) int { return int(binary.BigEndian.Uint32(v[8:12])) } |
1 | +package main | ||
2 | + | ||
3 | +import ( | ||
4 | + "fmt" | ||
5 | + "os" | ||
6 | + "path/filepath" | ||
7 | + | ||
8 | + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" | ||
9 | + "github.com/influxdata/influxdb/tsdb/engine/tsm1" | ||
10 | +) | ||
11 | + | ||
12 | +const ( | ||
13 | + maxBlocksPerKey = 65535 | ||
14 | +) | ||
15 | + | ||
16 | +// KeyIterator is used to iterate over b* keys for conversion to tsm keys | ||
17 | +type KeyIterator interface { | ||
18 | + Next() bool | ||
19 | + Read() (string, []tsm1.Value, error) | ||
20 | +} | ||
21 | + | ||
22 | +// Converter encapsulates the logic for converting b*1 shards to tsm1 shards. | ||
23 | +type Converter struct { | ||
24 | + path string | ||
25 | + maxTSMFileSize uint32 | ||
26 | + sequence int | ||
27 | + stats *stats.Stats | ||
28 | +} | ||
29 | + | ||
30 | +// NewConverter returns a new instance of the Converter. | ||
31 | +func NewConverter(path string, sz uint32, stats *stats.Stats) *Converter { | ||
32 | + return &Converter{ | ||
33 | + path: path, | ||
34 | + maxTSMFileSize: sz, | ||
35 | + stats: stats, | ||
36 | + } | ||
37 | +} | ||
38 | + | ||
39 | +// Process writes the data provided by iter to a tsm1 shard. | ||
40 | +func (c *Converter) Process(iter KeyIterator) error { | ||
41 | + // Ensure the tsm1 directory exists. | ||
42 | + if err := os.MkdirAll(c.path, 0777); err != nil { | ||
43 | + return err | ||
44 | + } | ||
45 | + | ||
46 | + // Iterate until no more data remains. | ||
47 | + var w tsm1.TSMWriter | ||
48 | + var keyCount map[string]int | ||
49 | + | ||
50 | + for iter.Next() { | ||
51 | + k, v, err := iter.Read() | ||
52 | + if err != nil { | ||
53 | + return err | ||
54 | + } | ||
55 | + | ||
56 | + if w == nil { | ||
57 | + w, err = c.nextTSMWriter() | ||
58 | + if err != nil { | ||
59 | + return err | ||
60 | + } | ||
61 | + keyCount = map[string]int{} | ||
62 | + } | ||
63 | + if err := w.Write(k, v); err != nil { | ||
64 | + return err | ||
65 | + } | ||
66 | + keyCount[k]++ | ||
67 | + | ||
68 | + c.stats.AddPointsRead(len(v)) | ||
69 | + c.stats.AddPointsWritten(len(v)) | ||
70 | + | ||
71 | + // If we have a max file size configured and we're over it, start a new TSM file. | ||
72 | + if w.Size() > c.maxTSMFileSize || keyCount[k] == maxBlocksPerKey { | ||
73 | + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { | ||
74 | + return err | ||
75 | + } | ||
76 | + | ||
77 | + c.stats.AddTSMBytes(w.Size()) | ||
78 | + | ||
79 | + if err := w.Close(); err != nil { | ||
80 | + return err | ||
81 | + } | ||
82 | + w = nil | ||
83 | + } | ||
84 | + } | ||
85 | + | ||
86 | + if w != nil { | ||
87 | + if err := w.WriteIndex(); err != nil && err != tsm1.ErrNoValues { | ||
88 | + return err | ||
89 | + } | ||
90 | + c.stats.AddTSMBytes(w.Size()) | ||
91 | + | ||
92 | + if err := w.Close(); err != nil { | ||
93 | + return err | ||
94 | + } | ||
95 | + } | ||
96 | + | ||
97 | + return nil | ||
98 | +} | ||
99 | + | ||
100 | +// nextTSMWriter returns the next TSMWriter for the Converter. | ||
101 | +func (c *Converter) nextTSMWriter() (tsm1.TSMWriter, error) { | ||
102 | + c.sequence++ | ||
103 | + fileName := filepath.Join(c.path, fmt.Sprintf("%09d-%09d.%s", 1, c.sequence, tsm1.TSMFileExtension)) | ||
104 | + | ||
105 | + fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) | ||
106 | + if err != nil { | ||
107 | + return nil, err | ||
108 | + } | ||
109 | + | ||
110 | + // Create the writer for the new TSM file. | ||
111 | + w, err := tsm1.NewTSMWriter(fd) | ||
112 | + if err != nil { | ||
113 | + return nil, err | ||
114 | + } | ||
115 | + | ||
116 | + c.stats.IncrTSMFileCount() | ||
117 | + return w, nil | ||
118 | +} |
This diff is collapsed. Click to expand it.
1 | +// Package stats contains statistics for converting non-TSM shards to TSM. | ||
2 | +package stats | ||
3 | + | ||
4 | +import ( | ||
5 | + "sync/atomic" | ||
6 | + "time" | ||
7 | +) | ||
8 | + | ||
9 | +// Stats are the statistics captured while converting non-TSM shards to TSM | ||
10 | +type Stats struct { | ||
11 | + NanFiltered uint64 | ||
12 | + InfFiltered uint64 | ||
13 | + FieldsFiltered uint64 | ||
14 | + PointsWritten uint64 | ||
15 | + PointsRead uint64 | ||
16 | + TsmFilesCreated uint64 | ||
17 | + TsmBytesWritten uint64 | ||
18 | + CompletedShards uint64 | ||
19 | + TotalTime time.Duration | ||
20 | +} | ||
21 | + | ||
22 | +// AddPointsRead increments the number of read points. | ||
23 | +func (s *Stats) AddPointsRead(n int) { | ||
24 | + atomic.AddUint64(&s.PointsRead, uint64(n)) | ||
25 | +} | ||
26 | + | ||
27 | +// AddPointsWritten increments the number of written points. | ||
28 | +func (s *Stats) AddPointsWritten(n int) { | ||
29 | + atomic.AddUint64(&s.PointsWritten, uint64(n)) | ||
30 | +} | ||
31 | + | ||
32 | +// AddTSMBytes increments the number of TSM Bytes. | ||
33 | +func (s *Stats) AddTSMBytes(n uint32) { | ||
34 | + atomic.AddUint64(&s.TsmBytesWritten, uint64(n)) | ||
35 | +} | ||
36 | + | ||
37 | +// IncrTSMFileCount increments the number of TSM files created. | ||
38 | +func (s *Stats) IncrTSMFileCount() { | ||
39 | + atomic.AddUint64(&s.TsmFilesCreated, 1) | ||
40 | +} | ||
41 | + | ||
42 | +// IncrNaN increments the number of NaNs filtered. | ||
43 | +func (s *Stats) IncrNaN() { | ||
44 | + atomic.AddUint64(&s.NanFiltered, 1) | ||
45 | +} | ||
46 | + | ||
47 | +// IncrInf increments the number of Infs filtered. | ||
48 | +func (s *Stats) IncrInf() { | ||
49 | + atomic.AddUint64(&s.InfFiltered, 1) | ||
50 | +} | ||
51 | + | ||
52 | +// IncrFiltered increments the number of fields filtered. | ||
53 | +func (s *Stats) IncrFiltered() { | ||
54 | + atomic.AddUint64(&s.FieldsFiltered, 1) | ||
55 | +} |
1 | +package main | ||
2 | + | ||
3 | +import ( | ||
4 | + "fmt" | ||
5 | + "log" | ||
6 | + "runtime" | ||
7 | + "sync" | ||
8 | + "sync/atomic" | ||
9 | + "time" | ||
10 | + | ||
11 | + "github.com/influxdata/influxdb/cmd/influx_tsm/stats" | ||
12 | + "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" | ||
13 | +) | ||
14 | + | ||
15 | +// tracker will orchestrate and track the conversions of non-TSM shards to TSM | ||
16 | +type tracker struct { | ||
17 | + Stats stats.Stats | ||
18 | + | ||
19 | + shards tsdb.ShardInfos | ||
20 | + opts options | ||
21 | + | ||
22 | + pg ParallelGroup | ||
23 | + wg sync.WaitGroup | ||
24 | +} | ||
25 | + | ||
26 | +// newTracker will setup and return a clean tracker instance | ||
27 | +func newTracker(shards tsdb.ShardInfos, opts options) *tracker { | ||
28 | + t := &tracker{ | ||
29 | + shards: shards, | ||
30 | + opts: opts, | ||
31 | + pg: NewParallelGroup(runtime.GOMAXPROCS(0)), | ||
32 | + } | ||
33 | + | ||
34 | + return t | ||
35 | +} | ||
36 | + | ||
37 | +func (t *tracker) Run() error { | ||
38 | + conversionStart := time.Now() | ||
39 | + | ||
40 | + // Backup each directory. | ||
41 | + if !opts.SkipBackup { | ||
42 | + databases := t.shards.Databases() | ||
43 | + fmt.Printf("Backing up %d databases...\n", len(databases)) | ||
44 | + t.wg.Add(len(databases)) | ||
45 | + for i := range databases { | ||
46 | + db := databases[i] | ||
47 | + go t.pg.Do(func() { | ||
48 | + defer t.wg.Done() | ||
49 | + | ||
50 | + start := time.Now() | ||
51 | + log.Printf("Backup of database '%v' started", db) | ||
52 | + err := backupDatabase(db) | ||
53 | + if err != nil { | ||
54 | + log.Fatalf("Backup of database %v failed: %v\n", db, err) | ||
55 | + } | ||
56 | + log.Printf("Database %v backed up (%v)\n", db, time.Since(start)) | ||
57 | + }) | ||
58 | + } | ||
59 | + t.wg.Wait() | ||
60 | + } else { | ||
61 | + fmt.Println("Database backup disabled.") | ||
62 | + } | ||
63 | + | ||
64 | + t.wg.Add(len(t.shards)) | ||
65 | + for i := range t.shards { | ||
66 | + si := t.shards[i] | ||
67 | + go t.pg.Do(func() { | ||
68 | + defer func() { | ||
69 | + atomic.AddUint64(&t.Stats.CompletedShards, 1) | ||
70 | + t.wg.Done() | ||
71 | + }() | ||
72 | + | ||
73 | + start := time.Now() | ||
74 | + log.Printf("Starting conversion of shard: %v", si.FullPath(opts.DataPath)) | ||
75 | + if err := convertShard(si, t); err != nil { | ||
76 | + log.Fatalf("Failed to convert %v: %v\n", si.FullPath(opts.DataPath), err) | ||
77 | + } | ||
78 | + log.Printf("Conversion of %v successful (%v)\n", si.FullPath(opts.DataPath), time.Since(start)) | ||
79 | + }) | ||
80 | + } | ||
81 | + | ||
82 | + done := make(chan struct{}) | ||
83 | + go func() { | ||
84 | + t.wg.Wait() | ||
85 | + close(done) | ||
86 | + }() | ||
87 | + | ||
88 | +WAIT_LOOP: | ||
89 | + for { | ||
90 | + select { | ||
91 | + case <-done: | ||
92 | + break WAIT_LOOP | ||
93 | + case <-time.After(opts.UpdateInterval): | ||
94 | + t.StatusUpdate() | ||
95 | + } | ||
96 | + } | ||
97 | + | ||
98 | + t.Stats.TotalTime = time.Since(conversionStart) | ||
99 | + | ||
100 | + return nil | ||
101 | +} | ||
102 | + | ||
103 | +func (t *tracker) StatusUpdate() { | ||
104 | + shardCount := atomic.LoadUint64(&t.Stats.CompletedShards) | ||
105 | + pointCount := atomic.LoadUint64(&t.Stats.PointsRead) | ||
106 | + pointWritten := atomic.LoadUint64(&t.Stats.PointsWritten) | ||
107 | + | ||
108 | + log.Printf("Still Working: Completed Shards: %d/%d Points read/written: %d/%d", shardCount, len(t.shards), pointCount, pointWritten) | ||
109 | +} | ||
110 | + | ||
111 | +func (t *tracker) PrintStats() { | ||
112 | + preSize := t.shards.Size() | ||
113 | + postSize := int64(t.Stats.TsmBytesWritten) | ||
114 | + | ||
115 | + fmt.Printf("\nSummary statistics\n========================================\n") | ||
116 | + fmt.Printf("Databases converted: %d\n", len(t.shards.Databases())) | ||
117 | + fmt.Printf("Shards converted: %d\n", len(t.shards)) | ||
118 | + fmt.Printf("TSM files created: %d\n", t.Stats.TsmFilesCreated) | ||
119 | + fmt.Printf("Points read: %d\n", t.Stats.PointsRead) | ||
120 | + fmt.Printf("Points written: %d\n", t.Stats.PointsWritten) | ||
121 | + fmt.Printf("NaN filtered: %d\n", t.Stats.NanFiltered) | ||
122 | + fmt.Printf("Inf filtered: %d\n", t.Stats.InfFiltered) | ||
123 | + fmt.Printf("Points without fields filtered: %d\n", t.Stats.FieldsFiltered) | ||
124 | + fmt.Printf("Disk usage pre-conversion (bytes): %d\n", preSize) | ||
125 | + fmt.Printf("Disk usage post-conversion (bytes): %d\n", postSize) | ||
126 | + fmt.Printf("Reduction factor: %d%%\n", 100*(preSize-postSize)/preSize) | ||
127 | + fmt.Printf("Bytes per TSM point: %.2f\n", float64(postSize)/float64(t.Stats.PointsWritten)) | ||
128 | + fmt.Printf("Total conversion time: %v\n", t.Stats.TotalTime) | ||
129 | + fmt.Println() | ||
130 | +} |
1 | +package tsdb | ||
2 | + | ||
3 | +import ( | ||
4 | + "encoding/binary" | ||
5 | + "errors" | ||
6 | + "fmt" | ||
7 | + "math" | ||
8 | +) | ||
9 | + | ||
10 | +const ( | ||
11 | + fieldFloat = 1 | ||
12 | + fieldInteger = 2 | ||
13 | + fieldBoolean = 3 | ||
14 | + fieldString = 4 | ||
15 | +) | ||
16 | + | ||
17 | +var ( | ||
18 | + // ErrFieldNotFound is returned when a field cannot be found. | ||
19 | + ErrFieldNotFound = errors.New("field not found") | ||
20 | + | ||
21 | + // ErrFieldUnmappedID is returned when the system is presented, during decode, with a field ID | ||
22 | + // there is no mapping for. | ||
23 | + ErrFieldUnmappedID = errors.New("field ID not mapped") | ||
24 | +) | ||
25 | + | ||
26 | +// FieldCodec provides encoding and decoding functionality for the fields of a given | ||
27 | +// Measurement. | ||
28 | +type FieldCodec struct { | ||
29 | + fieldsByID map[uint8]*Field | ||
30 | + fieldsByName map[string]*Field | ||
31 | +} | ||
32 | + | ||
33 | +// NewFieldCodec returns a FieldCodec for the given Measurement. Must be called with | ||
34 | +// a RLock that protects the Measurement. | ||
35 | +func NewFieldCodec(fields map[string]*Field) *FieldCodec { | ||
36 | + fieldsByID := make(map[uint8]*Field, len(fields)) | ||
37 | + fieldsByName := make(map[string]*Field, len(fields)) | ||
38 | + for _, f := range fields { | ||
39 | + fieldsByID[f.ID] = f | ||
40 | + fieldsByName[f.Name] = f | ||
41 | + } | ||
42 | + return &FieldCodec{fieldsByID: fieldsByID, fieldsByName: fieldsByName} | ||
43 | +} | ||
44 | + | ||
45 | +// FieldIDByName returns the ID for the given field. | ||
46 | +func (f *FieldCodec) FieldIDByName(s string) (uint8, error) { | ||
47 | + fi := f.fieldsByName[s] | ||
48 | + if fi == nil { | ||
49 | + return 0, ErrFieldNotFound | ||
50 | + } | ||
51 | + return fi.ID, nil | ||
52 | +} | ||
53 | + | ||
54 | +// DecodeByID scans a byte slice for a field with the given ID, converts it to its | ||
55 | +// expected type, and return that value. | ||
56 | +func (f *FieldCodec) DecodeByID(targetID uint8, b []byte) (interface{}, error) { | ||
57 | + var value interface{} | ||
58 | + for { | ||
59 | + if len(b) == 0 { | ||
60 | + // No more bytes. | ||
61 | + return nil, ErrFieldNotFound | ||
62 | + } | ||
63 | + | ||
64 | + field := f.fieldsByID[b[0]] | ||
65 | + if field == nil { | ||
66 | + // This can happen, though is very unlikely. If this node receives encoded data, to be written | ||
67 | + // to disk, and is queried for that data before its metastore is updated, there will be no field | ||
68 | + // mapping for the data during decode. All this can happen because data is encoded by the node | ||
69 | + // that first received the write request, not the node that actually writes the data to disk. | ||
70 | + // So if this happens, the read must be aborted. | ||
71 | + return nil, ErrFieldUnmappedID | ||
72 | + } | ||
73 | + | ||
74 | + switch field.Type { | ||
75 | + case fieldFloat: | ||
76 | + if field.ID == targetID { | ||
77 | + value = math.Float64frombits(binary.BigEndian.Uint64(b[1:9])) | ||
78 | + } | ||
79 | + b = b[9:] | ||
80 | + case fieldInteger: | ||
81 | + if field.ID == targetID { | ||
82 | + value = int64(binary.BigEndian.Uint64(b[1:9])) | ||
83 | + } | ||
84 | + b = b[9:] | ||
85 | + case fieldBoolean: | ||
86 | + if field.ID == targetID { | ||
87 | + value = b[1] == 1 | ||
88 | + } | ||
89 | + b = b[2:] | ||
90 | + case fieldString: | ||
91 | + length := binary.BigEndian.Uint16(b[1:3]) | ||
92 | + if field.ID == targetID { | ||
93 | + value = string(b[3 : 3+length]) | ||
94 | + } | ||
95 | + b = b[3+length:] | ||
96 | + default: | ||
97 | + panic(fmt.Sprintf("unsupported value type during decode by id: %T", field.Type)) | ||
98 | + } | ||
99 | + | ||
100 | + if value != nil { | ||
101 | + return value, nil | ||
102 | + } | ||
103 | + } | ||
104 | +} | ||
105 | + | ||
106 | +// DecodeByName scans a byte slice for a field with the given name, converts it to its | ||
107 | +// expected type, and return that value. | ||
108 | +func (f *FieldCodec) DecodeByName(name string, b []byte) (interface{}, error) { | ||
109 | + fi := f.FieldByName(name) | ||
110 | + if fi == nil { | ||
111 | + return 0, ErrFieldNotFound | ||
112 | + } | ||
113 | + return f.DecodeByID(fi.ID, b) | ||
114 | +} | ||
115 | + | ||
116 | +// FieldByName returns the field by its name. It will return a nil if not found | ||
117 | +func (f *FieldCodec) FieldByName(name string) *Field { | ||
118 | + return f.fieldsByName[name] | ||
119 | +} |
1 | +// Pacage tsdb abstracts the various shard types supported by the influx_tsm command. | ||
2 | +package tsdb // import "github.com/influxdata/influxdb/cmd/influx_tsm/tsdb" | ||
3 | + | ||
4 | +import ( | ||
5 | + "fmt" | ||
6 | + "os" | ||
7 | + "path" | ||
8 | + "path/filepath" | ||
9 | + "sort" | ||
10 | + "time" | ||
11 | + | ||
12 | + "github.com/boltdb/bolt" | ||
13 | + "github.com/influxdata/influxdb/pkg/slices" | ||
14 | +) | ||
15 | + | ||
16 | +// Flags for differentiating between engines | ||
17 | +const ( | ||
18 | + B1 = iota | ||
19 | + BZ1 | ||
20 | + TSM1 | ||
21 | +) | ||
22 | + | ||
23 | +// EngineFormat holds the flag for the engine | ||
24 | +type EngineFormat int | ||
25 | + | ||
26 | +// String returns the string format of the engine. | ||
27 | +func (e EngineFormat) String() string { | ||
28 | + switch e { | ||
29 | + case TSM1: | ||
30 | + return "tsm1" | ||
31 | + case B1: | ||
32 | + return "b1" | ||
33 | + case BZ1: | ||
34 | + return "bz1" | ||
35 | + default: | ||
36 | + panic("unrecognized shard engine format") | ||
37 | + } | ||
38 | +} | ||
39 | + | ||
40 | +// ShardInfo is the description of a shard on disk. | ||
41 | +type ShardInfo struct { | ||
42 | + Database string | ||
43 | + RetentionPolicy string | ||
44 | + Path string | ||
45 | + Format EngineFormat | ||
46 | + Size int64 | ||
47 | +} | ||
48 | + | ||
49 | +// FormatAsString returns the format of the shard as a string. | ||
50 | +func (s *ShardInfo) FormatAsString() string { | ||
51 | + return s.Format.String() | ||
52 | +} | ||
53 | + | ||
54 | +// FullPath returns the full path to the shard, given the data directory root. | ||
55 | +func (s *ShardInfo) FullPath(dataPath string) string { | ||
56 | + return filepath.Join(dataPath, s.Database, s.RetentionPolicy, s.Path) | ||
57 | +} | ||
58 | + | ||
59 | +// ShardInfos is an array of ShardInfo | ||
60 | +type ShardInfos []*ShardInfo | ||
61 | + | ||
62 | +func (s ShardInfos) Len() int { return len(s) } | ||
63 | +func (s ShardInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | ||
64 | +func (s ShardInfos) Less(i, j int) bool { | ||
65 | + if s[i].Database == s[j].Database { | ||
66 | + if s[i].RetentionPolicy == s[j].RetentionPolicy { | ||
67 | + return s[i].Path < s[j].Path | ||
68 | + } | ||
69 | + | ||
70 | + return s[i].RetentionPolicy < s[j].RetentionPolicy | ||
71 | + } | ||
72 | + | ||
73 | + return s[i].Database < s[j].Database | ||
74 | +} | ||
75 | + | ||
76 | +// Databases returns the sorted unique set of databases for the shards. | ||
77 | +func (s ShardInfos) Databases() []string { | ||
78 | + dbm := make(map[string]bool) | ||
79 | + for _, ss := range s { | ||
80 | + dbm[ss.Database] = true | ||
81 | + } | ||
82 | + | ||
83 | + var dbs []string | ||
84 | + for k := range dbm { | ||
85 | + dbs = append(dbs, k) | ||
86 | + } | ||
87 | + sort.Strings(dbs) | ||
88 | + return dbs | ||
89 | +} | ||
90 | + | ||
91 | +// FilterFormat returns a copy of the ShardInfos, with shards of the given | ||
92 | +// format removed. | ||
93 | +func (s ShardInfos) FilterFormat(fmt EngineFormat) ShardInfos { | ||
94 | + var a ShardInfos | ||
95 | + for _, si := range s { | ||
96 | + if si.Format != fmt { | ||
97 | + a = append(a, si) | ||
98 | + } | ||
99 | + } | ||
100 | + return a | ||
101 | +} | ||
102 | + | ||
103 | +// Size returns the space on disk consumed by the shards. | ||
104 | +func (s ShardInfos) Size() int64 { | ||
105 | + var sz int64 | ||
106 | + for _, si := range s { | ||
107 | + sz += si.Size | ||
108 | + } | ||
109 | + return sz | ||
110 | +} | ||
111 | + | ||
112 | +// ExclusiveDatabases returns a copy of the ShardInfo, with shards associated | ||
113 | +// with the given databases present. If the given set is empty, all databases | ||
114 | +// are returned. | ||
115 | +func (s ShardInfos) ExclusiveDatabases(exc []string) ShardInfos { | ||
116 | + var a ShardInfos | ||
117 | + | ||
118 | + // Empty set? Return everything. | ||
119 | + if len(exc) == 0 { | ||
120 | + a = make(ShardInfos, len(s)) | ||
121 | + copy(a, s) | ||
122 | + return a | ||
123 | + } | ||
124 | + | ||
125 | + for _, si := range s { | ||
126 | + if slices.Exists(exc, si.Database) { | ||
127 | + a = append(a, si) | ||
128 | + } | ||
129 | + } | ||
130 | + return a | ||
131 | +} | ||
132 | + | ||
133 | +// Database represents an entire database on disk. | ||
134 | +type Database struct { | ||
135 | + path string | ||
136 | +} | ||
137 | + | ||
138 | +// NewDatabase creates a database instance using data at path. | ||
139 | +func NewDatabase(path string) *Database { | ||
140 | + return &Database{path: path} | ||
141 | +} | ||
142 | + | ||
143 | +// Name returns the name of the database. | ||
144 | +func (d *Database) Name() string { | ||
145 | + return path.Base(d.path) | ||
146 | +} | ||
147 | + | ||
148 | +// Path returns the path to the database. | ||
149 | +func (d *Database) Path() string { | ||
150 | + return d.path | ||
151 | +} | ||
152 | + | ||
153 | +// Shards returns information for every shard in the database. | ||
154 | +func (d *Database) Shards() ([]*ShardInfo, error) { | ||
155 | + fd, err := os.Open(d.path) | ||
156 | + if err != nil { | ||
157 | + return nil, err | ||
158 | + } | ||
159 | + | ||
160 | + // Get each retention policy. | ||
161 | + rps, err := fd.Readdirnames(-1) | ||
162 | + if err != nil { | ||
163 | + return nil, err | ||
164 | + } | ||
165 | + | ||
166 | + // Process each retention policy. | ||
167 | + var shardInfos []*ShardInfo | ||
168 | + for _, rp := range rps { | ||
169 | + rpfd, err := os.Open(filepath.Join(d.path, rp)) | ||
170 | + if err != nil { | ||
171 | + return nil, err | ||
172 | + } | ||
173 | + | ||
174 | + // Process each shard | ||
175 | + shards, err := rpfd.Readdirnames(-1) | ||
176 | + if err != nil { | ||
177 | + return nil, err | ||
178 | + } | ||
179 | + | ||
180 | + for _, sh := range shards { | ||
181 | + fmt, sz, err := shardFormat(filepath.Join(d.path, rp, sh)) | ||
182 | + if err != nil { | ||
183 | + return nil, err | ||
184 | + } | ||
185 | + | ||
186 | + si := &ShardInfo{ | ||
187 | + Database: d.Name(), | ||
188 | + RetentionPolicy: path.Base(rp), | ||
189 | + Path: sh, | ||
190 | + Format: fmt, | ||
191 | + Size: sz, | ||
192 | + } | ||
193 | + shardInfos = append(shardInfos, si) | ||
194 | + } | ||
195 | + } | ||
196 | + | ||
197 | + sort.Sort(ShardInfos(shardInfos)) | ||
198 | + return shardInfos, nil | ||
199 | +} | ||
200 | + | ||
201 | +// shardFormat returns the format and size on disk of the shard at path. | ||
202 | +func shardFormat(path string) (EngineFormat, int64, error) { | ||
203 | + // If it's a directory then it's a tsm1 engine | ||
204 | + fi, err := os.Stat(path) | ||
205 | + if err != nil { | ||
206 | + return 0, 0, err | ||
207 | + } | ||
208 | + if fi.Mode().IsDir() { | ||
209 | + return TSM1, fi.Size(), nil | ||
210 | + } | ||
211 | + | ||
212 | + // It must be a BoltDB-based engine. | ||
213 | + db, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 1 * time.Second}) | ||
214 | + if err != nil { | ||
215 | + return 0, 0, err | ||
216 | + } | ||
217 | + defer db.Close() | ||
218 | + | ||
219 | + var format EngineFormat | ||
220 | + err = db.View(func(tx *bolt.Tx) error { | ||
221 | + // Retrieve the meta bucket. | ||
222 | + b := tx.Bucket([]byte("meta")) | ||
223 | + | ||
224 | + // If no format is specified then it must be an original b1 database. | ||
225 | + if b == nil { | ||
226 | + format = B1 | ||
227 | + return nil | ||
228 | + } | ||
229 | + | ||
230 | + // There is an actual format indicator. | ||
231 | + switch f := string(b.Get([]byte("format"))); f { | ||
232 | + case "b1", "v1": | ||
233 | + format = B1 | ||
234 | + case "bz1": | ||
235 | + format = BZ1 | ||
236 | + default: | ||
237 | + return fmt.Errorf("unrecognized engine format: %s", f) | ||
238 | + } | ||
239 | + | ||
240 | + return nil | ||
241 | + }) | ||
242 | + | ||
243 | + return format, fi.Size(), err | ||
244 | +} |
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
vendor/github.com/influxdata/influxdb/services/admin/assets/css/dropdowns-enhancement.css
0 → 100644
This diff is collapsed. Click to expand it.
vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.eot
0 → 100644
No preview for this file type
vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.svg
0 → 100644
This diff could not be displayed because it is too large.
vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.ttf
0 → 100644
No preview for this file type
vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.woff
0 → 100644
No preview for this file type
vendor/github.com/influxdata/influxdb/services/admin/assets/fonts/glyphicons-halflings-regular.woff2
0 → 100644
No preview for this file type
No preview for this file type
19.3 KB
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
91 KB
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
-
Please register or login to post a comment