Iker Narvaez

output variable test

Showing 117 changed files with 1799 additions and 1506 deletions
...@@ -4,4 +4,4 @@ memo = "efe4a26b5775ea537c0383b685d50fa64ee8fa6eec77406c5326d5f54744423f" ...@@ -4,4 +4,4 @@ memo = "efe4a26b5775ea537c0383b685d50fa64ee8fa6eec77406c5326d5f54744423f"
4 branch = "master" 4 branch = "master"
5 name = "github.com/influxdata/influxdb" 5 name = "github.com/influxdata/influxdb"
6 packages = ["client/v2","models","pkg/escape"] 6 packages = ["client/v2","models","pkg/escape"]
7 - revision = "31db9d6f468239346a1fe7464b5cf9c85580488f" 7 + revision = "a6c543039763c0f08253d71a43aefe3b570ecf37"
......
...@@ -2,6 +2,7 @@ package influx_client ...@@ -2,6 +2,7 @@ package influx_client
2 2
3 import( 3 import(
4 influx "github.com/influxdata/influxdb/client/v2" 4 influx "github.com/influxdata/influxdb/client/v2"
5 + "fmt"
5 "time" 6 "time"
6 ) 7 )
7 8
...@@ -32,6 +33,7 @@ func (i *InfluxClient) CreatePoint(tableName string, tags map[string]string, fie ...@@ -32,6 +33,7 @@ func (i *InfluxClient) CreatePoint(tableName string, tags map[string]string, fie
32 Precision: "s", 33 Precision: "s",
33 }) 34 })
34 35
36 + fmt.Println(i.Db)
35 pt, err := influx.NewPoint(tableName, tags, fields, timestamp) 37 pt, err := influx.NewPoint(tableName, tags, fields, timestamp)
36 if err != nil { 38 if err != nil {
37 panic(err) 39 panic(err)
......
...@@ -25,17 +25,19 @@ __Additional info:__ [Include gist of relevant config, logs, etc.] ...@@ -25,17 +25,19 @@ __Additional info:__ [Include gist of relevant config, logs, etc.]
25 Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team. 25 Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team.
26 26
27 ``` 27 ```
28 -curl -o block.txt "http://localhost:8086/debug/pprof/block?debug=1" 28 +curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true"
29 -curl -o goroutine.txt "http://localhost:8086/debug/pprof/goroutine?debug=1" 29 +
30 -curl -o heap.txt "http://localhost:8086/debug/pprof/heap?debug=1"
31 curl -o vars.txt "http://localhost:8086/debug/vars" 30 curl -o vars.txt "http://localhost:8086/debug/vars"
32 iostat -xd 1 30 > iostat.txt 31 iostat -xd 1 30 > iostat.txt
33 -influx -execute "show shards" > shards.txt
34 -influx -execute "show stats" > stats.txt
35 -influx -execute "show diagnostics" > diagnostics.txt
36 ``` 32 ```
37 33
38 -Please run those if possible and link them from a [gist](http://gist.github.com). 34 +**Please note** It will take at least 30 seconds for the first cURL command above to return a response.
35 +This is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect.
36 +Ideally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time.
37 +
38 +If you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether.
39 +
40 +Please run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue.
39 41
40 *Please note, the quickest way to fix a bug is to open a Pull Request.* 42 *Please note, the quickest way to fix a bug is to open a Pull Request.*
41 43
......
1 ## v1.3.0 [unreleased] 1 ## v1.3.0 [unreleased]
2 2
3 +### Removals
4 +
5 +The admin UI is removed and unusable in this release. The `[admin]` configuration section will be ignored.
6 +
7 +### Configuration Changes
8 +
9 +* The top-level config `bind-address` now defaults to `localhost:8088`.
10 + The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet).
11 +
3 ### Features 12 ### Features
4 13
5 - [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay 14 - [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay
...@@ -15,6 +24,16 @@ ...@@ -15,6 +24,16 @@
15 - [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL. 24 - [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL.
16 - [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL. 25 - [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL.
17 - [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language. 26 - [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language.
27 +- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements
28 +- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI.
29 +- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1
30 +- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits
31 +- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling.
32 +- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests.
33 +- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability
34 +- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive.
35 +- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support.
36 +- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator.
18 37
19 ### Bugfixes 38 ### Bugfixes
20 39
...@@ -27,6 +46,7 @@ ...@@ -27,6 +46,7 @@
27 - [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions. 46 - [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions.
28 - [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values. 47 - [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values.
29 - [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec. 48 - [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec.
49 +- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default
30 - [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query 50 - [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query
31 - [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query 51 - [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query
32 - [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. 52 - [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.
...@@ -42,15 +62,29 @@ ...@@ -42,15 +62,29 @@
42 - [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding 62 - [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding
43 - [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean. 63 - [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean.
44 - [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries. 64 - [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries.
65 +- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions.
66 +- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point.
67 +- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries.
68 +- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries.
69 +- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries.
70 +- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages
71 +- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error
72 +- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values.
73 +- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB
74 +- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file.
75 +- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently
45 76
46 ## v1.2.3 [unreleased] 77 ## v1.2.3 [unreleased]
47 78
48 ### Bugfixes 79 ### Bugfixes
49 80
81 +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history.
82 +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method
50 - [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get 83 - [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get
51 - [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. 84 - [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.
52 - [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. 85 - [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors.
53 - [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. 86 - [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered.
87 +- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points
54 88
55 ## v1.2.2 [2017-03-14] 89 ## v1.2.2 [2017-03-14]
56 90
...@@ -134,7 +168,6 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco ...@@ -134,7 +168,6 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco
134 168
135 ### Bugfixes 169 ### Bugfixes
136 170
137 -
138 - [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine. 171 - [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine.
139 - [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled. 172 - [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled.
140 - [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory 173 - [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory
...@@ -156,6 +189,7 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco ...@@ -156,6 +189,7 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco
156 - [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration. 189 - [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration.
157 - [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. 190 - [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs.
158 - [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... 191 - [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision....
192 +
159 ## v1.1.4 [2017-02-27] 193 ## v1.1.4 [2017-02-27]
160 194
161 ### Bugfixes 195 ### Bugfixes
......
...@@ -32,7 +32,7 @@ curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode ...@@ -32,7 +32,7 @@ curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode
32 ``` 32 ```
33 **If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. 33 **If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report.
34 34
35 -Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. 35 +Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed.
36 36
37 Feature requests 37 Feature requests
38 --------------- 38 ---------------
...@@ -69,7 +69,7 @@ second to sign our CLA, which can be found ...@@ -69,7 +69,7 @@ second to sign our CLA, which can be found
69 69
70 Installing Go 70 Installing Go
71 ------------- 71 -------------
72 -InfluxDB requires Go 1.7.4. 72 +InfluxDB requires Go 1.8.1
73 73
74 At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions 74 At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
75 on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). 75 on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
...@@ -77,8 +77,8 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm ...@@ -77,8 +77,8 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm
77 After installing gvm you can install and set the default go version by 77 After installing gvm you can install and set the default go version by
78 running the following: 78 running the following:
79 79
80 - gvm install go1.7.4 80 + gvm install go1.8.1
81 - gvm use go1.7.4 --default 81 + gvm use go1.8.1 --default
82 82
83 Installing GDM 83 Installing GDM
84 ------------- 84 -------------
......
1 -# Docker Setup
2 -========================
3 -
4 -This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment.
5 -
6 -## Building Image
7 -
8 -To build a docker image for InfluxDB from your current checkout, run the following:
9 -
10 -```
11 -$ ./build-docker.sh
12 -```
13 -
14 -This script uses the `golang:1.7.4` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image.
15 -
16 -To build the image using a different version of go:
17 -
18 -```
19 -$ GO_VER=1.7.4 ./build-docker.sh
20 -```
21 -
22 -Available version can be found [here](https://hub.docker.com/_/golang/).
23 -
24 -## Single Node Container
25 -
26 -This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually.
27 -
28 -```
29 -$ docker run -it -p 8086:8086 -p 8088:8088 influxdb
30 -```
1 -FROM busybox:ubuntu-14.04
2 -
3 -MAINTAINER Jason Wilder "<jason@influxdb.com>"
4 -
5 -# admin, http, udp, cluster, graphite, opentsdb, collectd
6 -EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826
7 -
8 -WORKDIR /app
9 -
10 -# copy binary into image
11 -COPY influxd /app/
12 -
13 -# Add influxd to the PATH
14 -ENV PATH=/app:$PATH
15 -
16 -# Generate a default config
17 -RUN influxd config > /etc/influxdb.toml
18 -
19 -# Use /data for all disk storage
20 -RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml
21 -
22 -VOLUME ["/data"]
23 -
24 -ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"]
...@@ -18,7 +18,7 @@ RUN gem install fpm ...@@ -18,7 +18,7 @@ RUN gem install fpm
18 18
19 # Install go 19 # Install go
20 ENV GOPATH /root/go 20 ENV GOPATH /root/go
21 -ENV GO_VERSION 1.7.4 21 +ENV GO_VERSION 1.8.1
22 ENV GO_ARCH 386 22 ENV GO_ARCH 386
23 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ 23 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
24 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ 24 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
...@@ -21,7 +21,7 @@ RUN gem install fpm ...@@ -21,7 +21,7 @@ RUN gem install fpm
21 21
22 # Install go 22 # Install go
23 ENV GOPATH /root/go 23 ENV GOPATH /root/go
24 -ENV GO_VERSION 1.7.4 24 +ENV GO_VERSION 1.8.1
25 ENV GO_ARCH amd64 25 ENV GO_ARCH amd64
26 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ 26 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
27 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ 27 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
...@@ -26,7 +26,7 @@ VOLUME $PROJECT_DIR ...@@ -26,7 +26,7 @@ VOLUME $PROJECT_DIR
26 26
27 27
28 # Install go 28 # Install go
29 -ENV GO_VERSION 1.7.4 29 +ENV GO_VERSION 1.8.1
30 ENV GO_ARCH amd64 30 ENV GO_ARCH amd64
31 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \ 31 RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
32 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \ 32 tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
...@@ -3,7 +3,6 @@ github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca ...@@ -3,7 +3,6 @@ github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca
3 github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c 3 github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
4 github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda 4 github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda
5 github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6 5 github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6
6 -github.com/clarkduvall/hyperloglog 2d38f733946d0a1f2e810513c71b834cbeba1480
7 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 6 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
8 github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb 7 github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb
9 github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef 8 github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef
...@@ -14,7 +13,6 @@ github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 ...@@ -14,7 +13,6 @@ github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967
14 github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815 13 github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815
15 github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 14 github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447
16 github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac 15 github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac
17 -github.com/rakyll/statik e383bbf6b2ec1a2fb8492dfd152d945fb88919b6
18 github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d 16 github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d
19 github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6 17 github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6
20 github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577 18 github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577
......
1 -#!/bin/bash
2 -
3 -set -e -x
4 -
5 -GO_VER=${GO_VER:-1.7.4}
6 -
7 -docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd'
8 -
9 -docker build -t influxdb .
...@@ -156,22 +156,6 @@ def package_man_files(build_root): ...@@ -156,22 +156,6 @@ def package_man_files(build_root):
156 for f in files: 156 for f in files:
157 run("gzip -9n {}".format(os.path.join(path, f))) 157 run("gzip -9n {}".format(os.path.join(path, f)))
158 158
159 -def run_generate():
160 - """Run 'go generate' to rebuild any static assets.
161 - """
162 - logging.info("Running 'go generate'...")
163 - if not check_path_for("statik"):
164 - run("go install github.com/rakyll/statik")
165 - orig_path = None
166 - if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep):
167 - orig_path = os.environ["PATH"].split(os.pathsep)
168 - os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin"))
169 - run("rm -f ./services/admin/statik/statik.go")
170 - run("go generate ./services/admin")
171 - if orig_path is not None:
172 - os.environ["PATH"] = orig_path
173 - return True
174 -
175 def go_get(branch, update=False, no_uncommitted=False): 159 def go_get(branch, update=False, no_uncommitted=False):
176 """Retrieve build dependencies or restore pinned dependencies. 160 """Retrieve build dependencies or restore pinned dependencies.
177 """ 161 """
...@@ -803,10 +787,6 @@ def main(args): ...@@ -803,10 +787,6 @@ def main(args):
803 if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted): 787 if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
804 return 1 788 return 1
805 789
806 - if args.generate:
807 - if not run_generate():
808 - return 1
809 -
810 if args.test: 790 if args.test:
811 if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report): 791 if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report):
812 return 1 792 return 1
...@@ -977,9 +957,6 @@ if __name__ == '__main__': ...@@ -977,9 +957,6 @@ if __name__ == '__main__':
977 type=str, 957 type=str,
978 default=DEFAULT_BUCKET, 958 default=DEFAULT_BUCKET,
979 help='Destination bucket for uploads') 959 help='Destination bucket for uploads')
980 - parser.add_argument('--generate',
981 - action='store_true',
982 - help='Run "go generate" before building')
983 parser.add_argument('--build-tags', 960 parser.add_argument('--build-tags',
984 metavar='<tags>', 961 metavar='<tags>',
985 help='Optional build tags to use for compilation') 962 help='Optional build tags to use for compilation')
......
...@@ -2,7 +2,7 @@ machine: ...@@ -2,7 +2,7 @@ machine:
2 services: 2 services:
3 - docker 3 - docker
4 environment: 4 environment:
5 - GODIST: "go1.7.4.linux-amd64.tar.gz" 5 + GODIST: "go1.8.1.linux-amd64.tar.gz"
6 post: 6 post:
7 - mkdir -p download 7 - mkdir -p download
8 - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST 8 - test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST
......
...@@ -324,7 +324,7 @@ func (p *Point) PrecisionString(precison string) string { ...@@ -324,7 +324,7 @@ func (p *Point) PrecisionString(precison string) string {
324 324
325 // Name returns the measurement name of the point. 325 // Name returns the measurement name of the point.
326 func (p *Point) Name() string { 326 func (p *Point) Name() string {
327 - return p.pt.Name() 327 + return string(p.pt.Name())
328 } 328 }
329 329
330 // Tags returns the tags associated with the point. 330 // Tags returns the tags associated with the point.
......
...@@ -284,9 +284,9 @@ func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error { ...@@ -284,9 +284,9 @@ func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error {
284 continue 284 continue
285 } 285 }
286 measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) 286 measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key)
287 - field = escape.String(field) 287 + field = escape.Bytes(field)
288 288
289 - if err := cmd.writeValues(w, measurement, field, values); err != nil { 289 + if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
290 // An error from writeValues indicates an IO error, which should be returned. 290 // An error from writeValues indicates an IO error, which should be returned.
291 return err 291 return err
292 } 292 }
...@@ -348,9 +348,9 @@ func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete fu ...@@ -348,9 +348,9 @@ func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete fu
348 for key, values := range t.Values { 348 for key, values := range t.Values {
349 measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) 349 measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key))
350 // measurements are stored escaped, field names are not 350 // measurements are stored escaped, field names are not
351 - field = escape.String(field) 351 + field = escape.Bytes(field)
352 352
353 - if err := cmd.writeValues(w, measurement, field, values); err != nil { 353 + if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
354 // An error from writeValues indicates an IO error, which should be returned. 354 // An error from writeValues indicates an IO error, which should be returned.
355 return err 355 return err
356 } 356 }
......
...@@ -290,6 +290,9 @@ func writeCorpusToWALFile(c corpus) *os.File { ...@@ -290,6 +290,9 @@ func writeCorpusToWALFile(c corpus) *os.File {
290 panic(err) 290 panic(err)
291 } 291 }
292 292
293 + if err := w.Flush(); err != nil {
294 + panic(err)
295 + }
293 // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway. 296 // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway.
294 if err := walFile.Sync(); err != nil { 297 if err := walFile.Sync(); err != nil {
295 panic(err) 298 panic(err)
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
8 "os" 8 "os"
9 9
10 "github.com/influxdata/influxdb/cmd" 10 "github.com/influxdata/influxdb/cmd"
11 + "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi"
11 "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" 12 "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm"
12 "github.com/influxdata/influxdb/cmd/influx_inspect/export" 13 "github.com/influxdata/influxdb/cmd/influx_inspect/export"
13 "github.com/influxdata/influxdb/cmd/influx_inspect/help" 14 "github.com/influxdata/influxdb/cmd/influx_inspect/help"
...@@ -53,6 +54,11 @@ func (m *Main) Run(args ...string) error { ...@@ -53,6 +54,11 @@ func (m *Main) Run(args ...string) error {
53 if err := help.NewCommand().Run(args...); err != nil { 54 if err := help.NewCommand().Run(args...); err != nil {
54 return fmt.Errorf("help: %s", err) 55 return fmt.Errorf("help: %s", err)
55 } 56 }
57 + case "dumptsi":
58 + name := dumptsi.NewCommand()
59 + if err := name.Run(args...); err != nil {
60 + return fmt.Errorf("dumptsi: %s", err)
61 + }
56 case "dumptsmdev": 62 case "dumptsmdev":
57 fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n") 63 fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n")
58 fallthrough 64 fallthrough
......
...@@ -7,6 +7,7 @@ import ( ...@@ -7,6 +7,7 @@ import (
7 "io" 7 "io"
8 "os" 8 "os"
9 "path/filepath" 9 "path/filepath"
10 + "sort"
10 "strconv" 11 "strconv"
11 "strings" 12 "strings"
12 "text/tabwriter" 13 "text/tabwriter"
...@@ -67,14 +68,14 @@ func (cmd *Command) Run(args ...string) error { ...@@ -67,14 +68,14 @@ func (cmd *Command) Run(args ...string) error {
67 } 68 }
68 69
69 if len(files) == 0 { 70 if len(files) == 0 {
70 - return fmt.Errorf("no tsm files at %v\n", cmd.dir) 71 + return fmt.Errorf("no tsm files at %v", cmd.dir)
71 } 72 }
72 73
73 tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) 74 tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
74 fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t")) 75 fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t"))
75 76
76 totalSeries := hllpp.New() 77 totalSeries := hllpp.New()
77 - tagCardialities := map[string]*hllpp.HLLPP{} 78 + tagCardinalities := map[string]*hllpp.HLLPP{}
78 measCardinalities := map[string]*hllpp.HLLPP{} 79 measCardinalities := map[string]*hllpp.HLLPP{}
79 fieldCardinalities := map[string]*hllpp.HLLPP{} 80 fieldCardinalities := map[string]*hllpp.HLLPP{}
80 81
...@@ -118,10 +119,10 @@ func (cmd *Command) Run(args ...string) error { ...@@ -118,10 +119,10 @@ func (cmd *Command) Run(args ...string) error {
118 fieldCount.Add([]byte(field)) 119 fieldCount.Add([]byte(field))
119 120
120 for _, t := range tags { 121 for _, t := range tags {
121 - tagCount, ok := tagCardialities[string(t.Key)] 122 + tagCount, ok := tagCardinalities[string(t.Key)]
122 if !ok { 123 if !ok {
123 tagCount = hllpp.New() 124 tagCount = hllpp.New()
124 - tagCardialities[string(t.Key)] = tagCount 125 + tagCardinalities[string(t.Key)] = tagCount
125 } 126 }
126 tagCount.Add(t.Value) 127 tagCount.Add(t.Value)
127 } 128 }
...@@ -140,22 +141,23 @@ func (cmd *Command) Run(args ...string) error { ...@@ -140,22 +141,23 @@ func (cmd *Command) Run(args ...string) error {
140 tw.Flush() 141 tw.Flush()
141 println() 142 println()
142 fmt.Printf("Statistics\n") 143 fmt.Printf("Statistics\n")
143 - fmt.Printf(" Series:\n") 144 + fmt.Printf("\tSeries:\n")
144 - fmt.Printf(" Total (est): %d\n", totalSeries.Count()) 145 + fmt.Printf("\t\tTotal (est): %d\n", totalSeries.Count())
146 +
145 if cmd.detailed { 147 if cmd.detailed {
146 - fmt.Printf(" Measurements (est):\n") 148 + fmt.Printf("\tMeasurements (est):\n")
147 - for t, card := range measCardinalities { 149 + for _, t := range sortKeys(measCardinalities) {
148 - fmt.Printf(" %v: %d (%d%%)\n", t, card.Count(), int((float64(card.Count())/float64(totalSeries.Count()))*100)) 150 + fmt.Printf("\t\t%v: %d (%d%%)\n", t, measCardinalities[t].Count(), int((float64(measCardinalities[t].Count())/float64(totalSeries.Count()))*100))
149 } 151 }
150 152
151 - fmt.Printf(" Fields (est):\n") 153 + fmt.Printf("\tFields (est):\n")
152 - for t, card := range fieldCardinalities { 154 + for _, t := range sortKeys(fieldCardinalities) {
153 - fmt.Printf(" %v: %d\n", t, card.Count()) 155 + fmt.Printf("\t\t%v: %d\n", t, fieldCardinalities[t].Count())
154 } 156 }
155 157
156 - fmt.Printf(" Tags (est):\n") 158 + fmt.Printf("\tTags (est):\n")
157 - for t, card := range tagCardialities { 159 + for _, t := range sortKeys(tagCardinalities) {
158 - fmt.Printf(" %v: %d\n", t, card.Count()) 160 + fmt.Printf("\t\t%v: %d\n", t, tagCardinalities[t].Count())
159 } 161 }
160 } 162 }
161 163
...@@ -163,6 +165,16 @@ func (cmd *Command) Run(args ...string) error { ...@@ -163,6 +165,16 @@ func (cmd *Command) Run(args ...string) error {
163 return nil 165 return nil
164 } 166 }
165 167
168 +// sortKeys is a quick helper to return the sorted set of a map's keys
169 +func sortKeys(vals map[string]*hllpp.HLLPP) (keys []string) {
170 + for k := range vals {
171 + keys = append(keys, k)
172 + }
173 + sort.Strings(keys)
174 +
175 + return keys
176 +}
177 +
166 // printUsage prints the usage message to STDERR. 178 // printUsage prints the usage message to STDERR.
167 func (cmd *Command) printUsage() { 179 func (cmd *Command) printUsage() {
168 usage := `Displays shard level report. 180 usage := `Displays shard level report.
......
...@@ -18,7 +18,6 @@ import ( ...@@ -18,7 +18,6 @@ import (
18 "github.com/influxdata/influxdb/coordinator" 18 "github.com/influxdata/influxdb/coordinator"
19 "github.com/influxdata/influxdb/monitor" 19 "github.com/influxdata/influxdb/monitor"
20 "github.com/influxdata/influxdb/monitor/diagnostics" 20 "github.com/influxdata/influxdb/monitor/diagnostics"
21 - "github.com/influxdata/influxdb/services/admin"
22 "github.com/influxdata/influxdb/services/collectd" 21 "github.com/influxdata/influxdb/services/collectd"
23 "github.com/influxdata/influxdb/services/continuous_querier" 22 "github.com/influxdata/influxdb/services/continuous_querier"
24 "github.com/influxdata/influxdb/services/graphite" 23 "github.com/influxdata/influxdb/services/graphite"
...@@ -34,7 +33,7 @@ import ( ...@@ -34,7 +33,7 @@ import (
34 33
35 const ( 34 const (
36 // DefaultBindAddress is the default address for various RPC services. 35 // DefaultBindAddress is the default address for various RPC services.
37 - DefaultBindAddress = ":8088" 36 + DefaultBindAddress = "127.0.0.1:8088"
38 ) 37 )
39 38
40 // Config represents the configuration format for the influxd binary. 39 // Config represents the configuration format for the influxd binary.
...@@ -45,7 +44,6 @@ type Config struct { ...@@ -45,7 +44,6 @@ type Config struct {
45 Retention retention.Config `toml:"retention"` 44 Retention retention.Config `toml:"retention"`
46 Precreator precreator.Config `toml:"shard-precreation"` 45 Precreator precreator.Config `toml:"shard-precreation"`
47 46
48 - Admin admin.Config `toml:"admin"`
49 Monitor monitor.Config `toml:"monitor"` 47 Monitor monitor.Config `toml:"monitor"`
50 Subscriber subscriber.Config `toml:"subscriber"` 48 Subscriber subscriber.Config `toml:"subscriber"`
51 HTTPD httpd.Config `toml:"http"` 49 HTTPD httpd.Config `toml:"http"`
...@@ -71,7 +69,6 @@ func NewConfig() *Config { ...@@ -71,7 +69,6 @@ func NewConfig() *Config {
71 c.Coordinator = coordinator.NewConfig() 69 c.Coordinator = coordinator.NewConfig()
72 c.Precreator = precreator.NewConfig() 70 c.Precreator = precreator.NewConfig()
73 71
74 - c.Admin = admin.NewConfig()
75 c.Monitor = monitor.NewConfig() 72 c.Monitor = monitor.NewConfig()
76 c.Subscriber = subscriber.NewConfig() 73 c.Subscriber = subscriber.NewConfig()
77 c.HTTPD = httpd.NewConfig() 74 c.HTTPD = httpd.NewConfig()
......
...@@ -22,9 +22,6 @@ dir = "/tmp/data" ...@@ -22,9 +22,6 @@ dir = "/tmp/data"
22 22
23 [coordinator] 23 [coordinator]
24 24
25 -[admin]
26 -bind-address = ":8083"
27 -
28 [http] 25 [http]
29 bind-address = ":8087" 26 bind-address = ":8087"
30 27
...@@ -69,8 +66,6 @@ enabled = true ...@@ -69,8 +66,6 @@ enabled = true
69 t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) 66 t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
70 } else if c.Data.Dir != "/tmp/data" { 67 } else if c.Data.Dir != "/tmp/data" {
71 t.Fatalf("unexpected data dir: %s", c.Data.Dir) 68 t.Fatalf("unexpected data dir: %s", c.Data.Dir)
72 - } else if c.Admin.BindAddress != ":8083" {
73 - t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress)
74 } else if c.HTTPD.BindAddress != ":8087" { 69 } else if c.HTTPD.BindAddress != ":8087" {
75 t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) 70 t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress)
76 } else if len(c.GraphiteInputs) != 2 { 71 } else if len(c.GraphiteInputs) != 2 {
......
...@@ -16,7 +16,6 @@ import ( ...@@ -16,7 +16,6 @@ import (
16 "github.com/influxdata/influxdb/influxql" 16 "github.com/influxdata/influxdb/influxql"
17 "github.com/influxdata/influxdb/models" 17 "github.com/influxdata/influxdb/models"
18 "github.com/influxdata/influxdb/monitor" 18 "github.com/influxdata/influxdb/monitor"
19 - "github.com/influxdata/influxdb/services/admin"
20 "github.com/influxdata/influxdb/services/collectd" 19 "github.com/influxdata/influxdb/services/collectd"
21 "github.com/influxdata/influxdb/services/continuous_querier" 20 "github.com/influxdata/influxdb/services/continuous_querier"
22 "github.com/influxdata/influxdb/services/graphite" 21 "github.com/influxdata/influxdb/services/graphite"
...@@ -252,15 +251,6 @@ func (s *Server) appendRetentionPolicyService(c retention.Config) { ...@@ -252,15 +251,6 @@ func (s *Server) appendRetentionPolicyService(c retention.Config) {
252 s.Services = append(s.Services, srv) 251 s.Services = append(s.Services, srv)
253 } 252 }
254 253
255 -func (s *Server) appendAdminService(c admin.Config) {
256 - if !c.Enabled {
257 - return
258 - }
259 - c.Version = s.buildInfo.Version
260 - srv := admin.NewService(c)
261 - s.Services = append(s.Services, srv)
262 -}
263 -
264 func (s *Server) appendHTTPDService(c httpd.Config) { 254 func (s *Server) appendHTTPDService(c httpd.Config) {
265 if !c.Enabled { 255 if !c.Enabled {
266 return 256 return
...@@ -374,7 +364,6 @@ func (s *Server) Open() error { ...@@ -374,7 +364,6 @@ func (s *Server) Open() error {
374 s.appendMonitorService() 364 s.appendMonitorService()
375 s.appendPrecreatorService(s.config.Precreator) 365 s.appendPrecreatorService(s.config.Precreator)
376 s.appendSnapshotterService() 366 s.appendSnapshotterService()
377 - s.appendAdminService(s.config.Admin)
378 s.appendContinuousQueryService(s.config.ContinuousQuery) 367 s.appendContinuousQueryService(s.config.ContinuousQuery)
379 s.appendHTTPDService(s.config.HTTPD) 368 s.appendHTTPDService(s.config.HTTPD)
380 s.appendRetentionPolicyService(s.config.Retention) 369 s.appendRetentionPolicyService(s.config.Retention)
......
...@@ -98,13 +98,16 @@ func NewPointsWriter() *PointsWriter { ...@@ -98,13 +98,16 @@ func NewPointsWriter() *PointsWriter {
98 98
99 // ShardMapping contains a mapping of shards to points. 99 // ShardMapping contains a mapping of shards to points.
100 type ShardMapping struct { 100 type ShardMapping struct {
101 + n int
101 Points map[uint64][]models.Point // The points associated with a shard ID 102 Points map[uint64][]models.Point // The points associated with a shard ID
102 Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID 103 Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID
104 + Dropped []models.Point // Points that were dropped
103 } 105 }
104 106
105 // NewShardMapping creates an empty ShardMapping. 107 // NewShardMapping creates an empty ShardMapping.
106 -func NewShardMapping() *ShardMapping { 108 +func NewShardMapping(n int) *ShardMapping {
107 return &ShardMapping{ 109 return &ShardMapping{
110 + n: n,
108 Points: map[uint64][]models.Point{}, 111 Points: map[uint64][]models.Point{},
109 Shards: map[uint64]*meta.ShardInfo{}, 112 Shards: map[uint64]*meta.ShardInfo{},
110 } 113 }
...@@ -112,6 +115,9 @@ func NewShardMapping() *ShardMapping { ...@@ -112,6 +115,9 @@ func NewShardMapping() *ShardMapping {
112 115
113 // MapPoint adds the point to the ShardMapping, associated with the given shardInfo. 116 // MapPoint adds the point to the ShardMapping, associated with the given shardInfo.
114 func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { 117 func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) {
118 + if cap(s.Points[shardInfo.ID]) < s.n {
119 + s.Points[shardInfo.ID] = make([]models.Point, 0, s.n)
120 + }
115 s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p) 121 s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p)
116 s.Shards[shardInfo.ID] = shardInfo 122 s.Shards[shardInfo.ID] = shardInfo
117 } 123 }
...@@ -218,12 +224,13 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) ...@@ -218,12 +224,13 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
218 list = list.Append(*sg) 224 list = list.Append(*sg)
219 } 225 }
220 226
221 - mapping := NewShardMapping() 227 + mapping := NewShardMapping(len(wp.Points))
222 for _, p := range wp.Points { 228 for _, p := range wp.Points {
223 sg := list.ShardGroupAt(p.Time()) 229 sg := list.ShardGroupAt(p.Time())
224 if sg == nil { 230 if sg == nil {
225 // We didn't create a shard group because the point was outside the 231 // We didn't create a shard group because the point was outside the
226 // scope of the RP. 232 // scope of the RP.
233 + mapping.Dropped = append(mapping.Dropped, p)
227 atomic.AddInt64(&w.stats.WriteDropped, 1) 234 atomic.AddInt64(&w.stats.WriteDropped, 1)
228 continue 235 continue
229 } 236 }
...@@ -319,6 +326,10 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency ...@@ -319,6 +326,10 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency
319 atomic.AddInt64(&w.stats.SubWriteDrop, 1) 326 atomic.AddInt64(&w.stats.SubWriteDrop, 1)
320 } 327 }
321 328
329 + if err == nil && len(shardMappings.Dropped) > 0 {
330 + err = tsdb.PartialWriteError{Reason: "points beyond retention policy", Dropped: len(shardMappings.Dropped)}
331 +
332 + }
322 timeout := time.NewTimer(w.WriteTimeout) 333 timeout := time.NewTimer(w.WriteTimeout)
323 defer timeout.Stop() 334 defer timeout.Stop()
324 for range shardMappings.Points { 335 for range shardMappings.Points {
...@@ -335,7 +346,7 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency ...@@ -335,7 +346,7 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency
335 } 346 }
336 } 347 }
337 } 348 }
338 - return nil 349 + return err
339 } 350 }
340 351
341 // writeToShards writes points to a shard. 352 // writeToShards writes points to a shard.
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
12 "github.com/influxdata/influxdb/coordinator" 12 "github.com/influxdata/influxdb/coordinator"
13 "github.com/influxdata/influxdb/models" 13 "github.com/influxdata/influxdb/models"
14 "github.com/influxdata/influxdb/services/meta" 14 "github.com/influxdata/influxdb/services/meta"
15 + "github.com/influxdata/influxdb/tsdb"
15 ) 16 )
16 17
17 // TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. 18 // TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient.
...@@ -234,8 +235,12 @@ func TestPointsWriter_MapShards_Invalid(t *testing.T) { ...@@ -234,8 +235,12 @@ func TestPointsWriter_MapShards_Invalid(t *testing.T) {
234 t.Fatalf("unexpected an error: %v", err) 235 t.Fatalf("unexpected an error: %v", err)
235 } 236 }
236 237
237 - if exp := 0; len(shardMappings.Points) != exp { 238 + if got, exp := len(shardMappings.Points), 0; got != exp {
238 - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) 239 + t.Errorf("MapShards() len mismatch. got %v, exp %v", got, exp)
240 + }
241 +
242 + if got, exp := len(shardMappings.Dropped), 1; got != exp {
243 + t.Fatalf("MapShard() dropped mismatch: got %v, exp %v", got, exp)
239 } 244 }
240 } 245 }
241 246
...@@ -286,7 +291,7 @@ func TestPointsWriter_WritePoints(t *testing.T) { ...@@ -286,7 +291,7 @@ func TestPointsWriter_WritePoints(t *testing.T) {
286 291
287 // copy to prevent data race 292 // copy to prevent data race
288 theTest := test 293 theTest := test
289 - sm := coordinator.NewShardMapping() 294 + sm := coordinator.NewShardMapping(16)
290 sm.MapPoint( 295 sm.MapPoint(
291 &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ 296 &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{
292 {NodeID: 1}, 297 {NodeID: 1},
...@@ -365,6 +370,64 @@ func TestPointsWriter_WritePoints(t *testing.T) { ...@@ -365,6 +370,64 @@ func TestPointsWriter_WritePoints(t *testing.T) {
365 } 370 }
366 } 371 }
367 372
373 +func TestPointsWriter_WritePoints_Dropped(t *testing.T) {
374 + pr := &coordinator.WritePointsRequest{
375 + Database: "mydb",
376 + RetentionPolicy: "myrp",
377 + }
378 +
379 + // Ensure that the test shard groups are created before the points
380 + // are created.
381 + ms := NewPointsWriterMetaClient()
382 +
383 + // Three points that range over the shardGroup duration (1h) and should map to two
384 + // distinct shards
385 + pr.AddPoint("cpu", 1.0, time.Now().Add(-24*time.Hour), nil)
386 +
387 + // copy to prevent data race
388 + sm := coordinator.NewShardMapping(16)
389 +
390 + // ShardMapper dropped this point
391 + sm.Dropped = append(sm.Dropped, pr.Points[0])
392 +
393 + // Local coordinator.Node ShardWriter
394 + // lock on the write increment since these functions get called in parallel
395 + var mu sync.Mutex
396 +
397 + store := &fakeStore{
398 + WriteFn: func(shardID uint64, points []models.Point) error {
399 + mu.Lock()
400 + defer mu.Unlock()
401 + return nil
402 + },
403 + }
404 +
405 + ms.DatabaseFn = func(database string) *meta.DatabaseInfo {
406 + return nil
407 + }
408 + ms.NodeIDFn = func() uint64 { return 1 }
409 +
410 + subPoints := make(chan *coordinator.WritePointsRequest, 1)
411 + sub := Subscriber{}
412 + sub.PointsFn = func() chan<- *coordinator.WritePointsRequest {
413 + return subPoints
414 + }
415 +
416 + c := coordinator.NewPointsWriter()
417 + c.MetaClient = ms
418 + c.TSDBStore = store
419 + c.Subscriber = sub
420 + c.Node = &influxdb.Node{ID: 1}
421 +
422 + c.Open()
423 + defer c.Close()
424 +
425 + err := c.WritePoints(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points)
426 + if _, ok := err.(tsdb.PartialWriteError); !ok {
427 + t.Errorf("PointsWriter.WritePoints(): got %v, exp %v", err, tsdb.PartialWriteError{})
428 + }
429 +}
430 +
368 type fakePointsWriter struct { 431 type fakePointsWriter struct {
369 WritePointsIntoFn func(*coordinator.IntoWriteRequest) error 432 WritePointsIntoFn func(*coordinator.IntoWriteRequest) error
370 } 433 }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
2 2
3 # The values in this file override the default values used by the system if 3 # The values in this file override the default values used by the system if
4 # a config option is not specified. The commented out lines are the configuration 4 # a config option is not specified. The commented out lines are the configuration
5 -# field and the default value used. Uncommentting a line and changing the value 5 +# field and the default value used. Uncommenting a line and changing the value
6 # will change the value used at runtime when the process is restarted. 6 # will change the value used at runtime when the process is restarted.
7 7
8 # Once every 24 hours InfluxDB will report usage data to usage.influxdata.com 8 # Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
...@@ -11,10 +11,8 @@ ...@@ -11,10 +11,8 @@
11 # Change this option to true to disable reporting. 11 # Change this option to true to disable reporting.
12 # reporting-disabled = false 12 # reporting-disabled = false
13 13
14 -# we'll try to get the hostname automatically, but if it the os returns something 14 +# Bind address to use for the RPC service for backup and restore.
15 -# that isn't resolvable by other servers in the cluster, use this option to 15 +# bind-address = "127.0.0.1:8088"
16 -# manually set the hostname
17 -# hostname = "localhost"
18 16
19 ### 17 ###
20 ### [meta] 18 ### [meta]
...@@ -89,6 +87,11 @@ ...@@ -89,6 +87,11 @@
89 # write or delete 87 # write or delete
90 # compact-full-write-cold-duration = "4h" 88 # compact-full-write-cold-duration = "4h"
91 89
90 + # The maximum number of concurrent full and level compactions that can run at one time. A
91 + # value of 0 results in runtime.GOMAXPROCS(0) used at runtime. This setting does not apply
92 + # to cache snapshotting.
93 + # max-concurrent-compactions = 0
94 +
92 # The maximum series allowed per database before writes are dropped. This limit can prevent 95 # The maximum series allowed per database before writes are dropped. This limit can prevent
93 # high cardinality issues at the database level. This limit can be disabled by setting it to 96 # high cardinality issues at the database level. This limit can be disabled by setting it to
94 # 0. 97 # 0.
...@@ -118,7 +121,7 @@ ...@@ -118,7 +121,7 @@
118 # can help prevent run away queries. Setting the value to 0 disables the limit. 121 # can help prevent run away queries. Setting the value to 0 disables the limit.
119 # query-timeout = "0s" 122 # query-timeout = "0s"
120 123
121 - # The the time threshold when a query will be logged as a slow query. This limit can be set to help 124 + # The time threshold when a query will be logged as a slow query. This limit can be set to help
122 # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. 125 # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
123 # log-queries-after = "0s" 126 # log-queries-after = "0s"
124 127
...@@ -128,12 +131,9 @@ ...@@ -128,12 +131,9 @@
128 131
129 # The maximum number of series a SELECT can run. A value of 0 will make the maximum series 132 # The maximum number of series a SELECT can run. A value of 0 will make the maximum series
130 # count unlimited. 133 # count unlimited.
131 -
132 - # The maximum number of series a SELECT can run. A value of zero will make the maximum series
133 - # count unlimited.
134 # max-select-series = 0 134 # max-select-series = 0
135 135
136 - # The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum 136 + # The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum
137 # number of buckets unlimited. 137 # number of buckets unlimited.
138 # max-select-buckets = 0 138 # max-select-buckets = 0
139 139
...@@ -189,27 +189,6 @@ ...@@ -189,27 +189,6 @@
189 # store-interval = "10s" 189 # store-interval = "10s"
190 190
191 ### 191 ###
192 -### [admin]
193 -###
194 -### Controls the availability of the built-in, web-based admin interface. If HTTPS is
195 -### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
196 -###
197 -### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release.
198 -
199 -[admin]
200 - # Determines whether the admin service is enabled.
201 - # enabled = false
202 -
203 - # The default bind address used by the admin service.
204 - # bind-address = ":8083"
205 -
206 - # Whether the admin service should use HTTPS.
207 - # https-enabled = false
208 -
209 - # The SSL certificate used when HTTPS is enabled.
210 - # https-certificate = "/etc/ssl/influxdb.pem"
211 -
212 -###
213 ### [http] 192 ### [http]
214 ### 193 ###
215 ### Controls how the HTTP endpoints are configured. These are the primary 194 ### Controls how the HTTP endpoints are configured. These are the primary
...@@ -223,13 +202,13 @@ ...@@ -223,13 +202,13 @@
223 # The bind address used by the HTTP service. 202 # The bind address used by the HTTP service.
224 # bind-address = ":8086" 203 # bind-address = ":8086"
225 204
226 - # Determines whether HTTP authentication is enabled. 205 + # Determines whether user authentication is enabled over HTTP/HTTPS.
227 # auth-enabled = false 206 # auth-enabled = false
228 207
229 # The default realm sent back when issuing a basic auth challenge. 208 # The default realm sent back when issuing a basic auth challenge.
230 # realm = "InfluxDB" 209 # realm = "InfluxDB"
231 210
232 - # Determines whether HTTP request logging is enable.d 211 + # Determines whether HTTP request logging is enabled.
233 # log-enabled = true 212 # log-enabled = true
234 213
235 # Determines whether detailed write logging is enabled. 214 # Determines whether detailed write logging is enabled.
......
...@@ -1761,6 +1761,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error { ...@@ -1761,6 +1761,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error {
1761 return err 1761 return err
1762 } 1762 }
1763 1763
1764 + if err := s.validateTopBottom(); err != nil {
1765 + return err
1766 + }
1767 +
1764 if err := s.validateAggregates(tr); err != nil { 1768 if err := s.validateAggregates(tr); err != nil {
1765 return err 1769 return err
1766 } 1770 }
...@@ -2247,7 +2251,7 @@ func (s *SelectStatement) validateDistinct() error { ...@@ -2247,7 +2251,7 @@ func (s *SelectStatement) validateDistinct() error {
2247 } 2251 }
2248 2252
2249 if len(s.Fields) > 1 { 2253 if len(s.Fields) > 1 {
2250 - return fmt.Errorf("aggregate function distinct() can not be combined with other functions or fields") 2254 + return fmt.Errorf("aggregate function distinct() cannot be combined with other functions or fields")
2251 } 2255 }
2252 2256
2253 switch c := s.Fields[0].Expr.(type) { 2257 switch c := s.Fields[0].Expr.(type) {
...@@ -2263,6 +2267,19 @@ func (s *SelectStatement) validateDistinct() error { ...@@ -2263,6 +2267,19 @@ func (s *SelectStatement) validateDistinct() error {
2263 return nil 2267 return nil
2264 } 2268 }
2265 2269
2270 +func (s *SelectStatement) validateTopBottom() error {
2271 + // Ensure there are not multiple calls if top/bottom is present.
2272 + info := newSelectInfo(s)
2273 + if len(info.calls) > 1 {
2274 + for call := range info.calls {
2275 + if call.Name == "top" || call.Name == "bottom" {
2276 + return fmt.Errorf("selector function %s() cannot be combined with other functions", call.Name)
2277 + }
2278 + }
2279 + }
2280 + return nil
2281 +}
2282 +
2266 // GroupByInterval extracts the time interval, if specified. 2283 // GroupByInterval extracts the time interval, if specified.
2267 func (s *SelectStatement) GroupByInterval() (time.Duration, error) { 2284 func (s *SelectStatement) GroupByInterval() (time.Duration, error) {
2268 // return if we've already pulled it out 2285 // return if we've already pulled it out
...@@ -4084,7 +4101,7 @@ func TimeRange(expr Expr) (min, max time.Time, err error) { ...@@ -4084,7 +4101,7 @@ func TimeRange(expr Expr) (min, max time.Time, err error) {
4084 4101
4085 // TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by 4102 // TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by
4086 // an expression. If there is no lower bound, the minimum time is returned 4103 // an expression. If there is no lower bound, the minimum time is returned
4087 -// for minimum. If there is no higher bound, now is returned for maximum. 4104 +// for minimum. If there is no higher bound, the maximum time is returned.
4088 func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) { 4105 func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) {
4089 tmin, tmax, err := TimeRange(expr) 4106 tmin, tmax, err := TimeRange(expr)
4090 if err != nil { 4107 if err != nil {
...@@ -4097,7 +4114,7 @@ func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) { ...@@ -4097,7 +4114,7 @@ func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) {
4097 min = tmin.UnixNano() 4114 min = tmin.UnixNano()
4098 } 4115 }
4099 if tmax.IsZero() { 4116 if tmax.IsZero() {
4100 - max = time.Now().UnixNano() 4117 + max = time.Unix(0, MaxTime).UnixNano()
4101 } else { 4118 } else {
4102 max = tmax.UnixNano() 4119 max = tmax.UnixNano()
4103 } 4120 }
...@@ -4283,7 +4300,15 @@ func Rewrite(r Rewriter, node Node) Node { ...@@ -4283,7 +4300,15 @@ func Rewrite(r Rewriter, node Node) Node {
4283 n.Fields = Rewrite(r, n.Fields).(Fields) 4300 n.Fields = Rewrite(r, n.Fields).(Fields)
4284 n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions) 4301 n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions)
4285 n.Sources = Rewrite(r, n.Sources).(Sources) 4302 n.Sources = Rewrite(r, n.Sources).(Sources)
4286 - n.Condition = Rewrite(r, n.Condition).(Expr) 4303 +
4304 + // Rewrite may return nil. Nil does not satisfy the Expr
4305 + // interface. We only assert the rewritten result to be an
4306 + // Expr if it is not nil:
4307 + if cond := Rewrite(r, n.Condition); cond != nil {
4308 + n.Condition = cond.(Expr)
4309 + } else {
4310 + n.Condition = nil
4311 + }
4287 4312
4288 case *SubQuery: 4313 case *SubQuery:
4289 n.Statement = Rewrite(r, n.Statement).(*SelectStatement) 4314 n.Statement = Rewrite(r, n.Statement).(*SelectStatement)
......
1 package influxql 1 package influxql
2 2
3 import ( 3 import (
4 - "bytes"
5 - "container/heap"
6 "fmt" 4 "fmt"
7 "math" 5 "math"
8 "sort" 6 "sort"
...@@ -783,19 +781,17 @@ func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint { ...@@ -783,19 +781,17 @@ func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint {
783 return []IntegerPoint{{Time: ZeroTime, Value: max - min}} 781 return []IntegerPoint{{Time: ZeroTime, Value: max - min}}
784 } 782 }
785 783
786 -func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) { 784 +func newTopIterator(input Iterator, opt IteratorOptions, n int) (Iterator, error) {
787 switch input := input.(type) { 785 switch input := input.(type) {
788 case FloatIterator: 786 case FloatIterator:
789 - aggregateFn := NewFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
790 createFn := func() (FloatPointAggregator, FloatPointEmitter) { 787 createFn := func() (FloatPointAggregator, FloatPointEmitter) {
791 - fn := NewFloatSliceFuncReducer(aggregateFn) 788 + fn := NewFloatTopReducer(n)
792 return fn, fn 789 return fn, fn
793 } 790 }
794 return newFloatReduceFloatIterator(input, opt, createFn), nil 791 return newFloatReduceFloatIterator(input, opt, createFn), nil
795 case IntegerIterator: 792 case IntegerIterator:
796 - aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
797 createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { 793 createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
798 - fn := NewIntegerSliceFuncReducer(aggregateFn) 794 + fn := NewIntegerTopReducer(n)
799 return fn, fn 795 return fn, fn
800 } 796 }
801 return newIntegerReduceIntegerIterator(input, opt, createFn), nil 797 return newIntegerReduceIntegerIterator(input, opt, createFn), nil
...@@ -804,111 +800,17 @@ func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags ...@@ -804,111 +800,17 @@ func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags
804 } 800 }
805 } 801 }
806 802
807 -// NewFloatTopReduceSliceFunc returns the top values within a window. 803 +func newBottomIterator(input Iterator, opt IteratorOptions, n int) (Iterator, error) {
808 -func NewFloatTopReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
809 - return func(a []FloatPoint) []FloatPoint {
810 - // Filter by tags if they exist.
811 - if len(tags) > 0 {
812 - a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
813 - return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
814 - })
815 - }
816 -
817 - // If we ask for more elements than exist, restrict n to be the length of the array.
818 - size := n
819 - if size > len(a) {
820 - size = len(a)
821 - }
822 -
823 - // Construct a heap preferring higher values and breaking ties
824 - // based on the earliest time for a point.
825 - h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
826 - if a.Value != b.Value {
827 - return a.Value > b.Value
828 - }
829 - return a.Time < b.Time
830 - })
831 - heap.Init(h)
832 -
833 - // Pop the first n elements and then sort by time.
834 - points := make([]FloatPoint, 0, size)
835 - for i := 0; i < size; i++ {
836 - p := heap.Pop(h).(FloatPoint)
837 - points = append(points, p)
838 - }
839 -
840 - // Either zero out all values or sort the points by time
841 - // depending on if a time interval was given or not.
842 - if !interval.IsZero() {
843 - for i := range points {
844 - points[i].Time = ZeroTime
845 - }
846 - } else {
847 - sort.Stable(floatPointsByTime(points))
848 - }
849 - return points
850 - }
851 -}
852 -
853 -// NewIntegerTopReduceSliceFunc returns the top values within a window.
854 -func NewIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
855 - return func(a []IntegerPoint) []IntegerPoint {
856 - // Filter by tags if they exist.
857 - if len(tags) > 0 {
858 - a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
859 - return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
860 - })
861 - }
862 -
863 - // If we ask for more elements than exist, restrict n to be the length of the array.
864 - size := n
865 - if size > len(a) {
866 - size = len(a)
867 - }
868 -
869 - // Construct a heap preferring higher values and breaking ties
870 - // based on the earliest time for a point.
871 - h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
872 - if a.Value != b.Value {
873 - return a.Value > b.Value
874 - }
875 - return a.Time < b.Time
876 - })
877 - heap.Init(h)
878 -
879 - // Pop the first n elements and then sort by time.
880 - points := make([]IntegerPoint, 0, size)
881 - for i := 0; i < size; i++ {
882 - p := heap.Pop(h).(IntegerPoint)
883 - points = append(points, p)
884 - }
885 -
886 - // Either zero out all values or sort the points by time
887 - // depending on if a time interval was given or not.
888 - if !interval.IsZero() {
889 - for i := range points {
890 - points[i].Time = ZeroTime
891 - }
892 - } else {
893 - sort.Stable(integerPointsByTime(points))
894 - }
895 - return points
896 - }
897 -}
898 -
899 -func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) {
900 switch input := input.(type) { 804 switch input := input.(type) {
901 case FloatIterator: 805 case FloatIterator:
902 - aggregateFn := NewFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
903 createFn := func() (FloatPointAggregator, FloatPointEmitter) { 806 createFn := func() (FloatPointAggregator, FloatPointEmitter) {
904 - fn := NewFloatSliceFuncReducer(aggregateFn) 807 + fn := NewFloatBottomReducer(n)
905 return fn, fn 808 return fn, fn
906 } 809 }
907 return newFloatReduceFloatIterator(input, opt, createFn), nil 810 return newFloatReduceFloatIterator(input, opt, createFn), nil
908 case IntegerIterator: 811 case IntegerIterator:
909 - aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
910 createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { 812 createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
911 - fn := NewIntegerSliceFuncReducer(aggregateFn) 813 + fn := NewIntegerBottomReducer(n)
912 return fn, fn 814 return fn, fn
913 } 815 }
914 return newIntegerReduceIntegerIterator(input, opt, createFn), nil 816 return newIntegerReduceIntegerIterator(input, opt, createFn), nil
...@@ -917,158 +819,6 @@ func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, t ...@@ -917,158 +819,6 @@ func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, t
917 } 819 }
918 } 820 }
919 821
920 -// NewFloatBottomReduceSliceFunc returns the bottom values within a window.
921 -func NewFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
922 - return func(a []FloatPoint) []FloatPoint {
923 - // Filter by tags if they exist.
924 - if len(tags) > 0 {
925 - a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
926 - return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
927 - })
928 - }
929 -
930 - // If we ask for more elements than exist, restrict n to be the length of the array.
931 - size := n
932 - if size > len(a) {
933 - size = len(a)
934 - }
935 -
936 - // Construct a heap preferring lower values and breaking ties
937 - // based on the earliest time for a point.
938 - h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
939 - if a.Value != b.Value {
940 - return a.Value < b.Value
941 - }
942 - return a.Time < b.Time
943 - })
944 - heap.Init(h)
945 -
946 - // Pop the first n elements and then sort by time.
947 - points := make([]FloatPoint, 0, size)
948 - for i := 0; i < size; i++ {
949 - p := heap.Pop(h).(FloatPoint)
950 - points = append(points, p)
951 - }
952 -
953 - // Either zero out all values or sort the points by time
954 - // depending on if a time interval was given or not.
955 - if !interval.IsZero() {
956 - for i := range points {
957 - points[i].Time = ZeroTime
958 - }
959 - } else {
960 - sort.Stable(floatPointsByTime(points))
961 - }
962 - return points
963 - }
964 -}
965 -
966 -// NewIntegerBottomReduceSliceFunc returns the bottom values within a window.
967 -func NewIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
968 - return func(a []IntegerPoint) []IntegerPoint {
969 - // Filter by tags if they exist.
970 - if len(tags) > 0 {
971 - a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
972 - return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
973 - })
974 - }
975 -
976 - // If we ask for more elements than exist, restrict n to be the length of the array.
977 - size := n
978 - if size > len(a) {
979 - size = len(a)
980 - }
981 -
982 - // Construct a heap preferring lower values and breaking ties
983 - // based on the earliest time for a point.
984 - h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
985 - if a.Value != b.Value {
986 - return a.Value < b.Value
987 - }
988 - return a.Time < b.Time
989 - })
990 - heap.Init(h)
991 -
992 - // Pop the first n elements and then sort by time.
993 - points := make([]IntegerPoint, 0, size)
994 - for i := 0; i < size; i++ {
995 - p := heap.Pop(h).(IntegerPoint)
996 - points = append(points, p)
997 - }
998 -
999 - // Either zero out all values or sort the points by time
1000 - // depending on if a time interval was given or not.
1001 - if !interval.IsZero() {
1002 - for i := range points {
1003 - points[i].Time = ZeroTime
1004 - }
1005 - } else {
1006 - sort.Stable(integerPointsByTime(points))
1007 - }
1008 - return points
1009 - }
1010 -}
1011 -
1012 -func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint {
1013 - pointMap := make(map[string]FloatPoint)
1014 - for _, p := range a {
1015 - keyBuf := bytes.NewBuffer(nil)
1016 - for i, index := range tags {
1017 - if i > 0 {
1018 - keyBuf.WriteString(",")
1019 - }
1020 - fmt.Fprintf(keyBuf, "%s", p.Aux[index])
1021 - }
1022 - key := keyBuf.String()
1023 -
1024 - cur, ok := pointMap[key]
1025 - if ok {
1026 - if cmpFunc(&cur, &p) {
1027 - pointMap[key] = p
1028 - }
1029 - } else {
1030 - pointMap[key] = p
1031 - }
1032 - }
1033 -
1034 - // Recreate the original array with our new filtered list.
1035 - points := make([]FloatPoint, 0, len(pointMap))
1036 - for _, p := range pointMap {
1037 - points = append(points, p)
1038 - }
1039 - return points
1040 -}
1041 -
1042 -func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint {
1043 - pointMap := make(map[string]IntegerPoint)
1044 - for _, p := range a {
1045 - keyBuf := bytes.NewBuffer(nil)
1046 - for i, index := range tags {
1047 - if i > 0 {
1048 - keyBuf.WriteString(",")
1049 - }
1050 - fmt.Fprintf(keyBuf, "%s", p.Aux[index])
1051 - }
1052 - key := keyBuf.String()
1053 -
1054 - cur, ok := pointMap[key]
1055 - if ok {
1056 - if cmpFunc(&cur, &p) {
1057 - pointMap[key] = p
1058 - }
1059 - } else {
1060 - pointMap[key] = p
1061 - }
1062 - }
1063 -
1064 - // Recreate the original array with our new filtered list.
1065 - points := make([]IntegerPoint, 0, len(pointMap))
1066 - for _, p := range pointMap {
1067 - points = append(points, p)
1068 - }
1069 - return points
1070 -}
1071 -
1072 // newPercentileIterator returns an iterator for operating on a percentile() call. 822 // newPercentileIterator returns an iterator for operating on a percentile() call.
1073 func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { 823 func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) {
1074 switch input := input.(type) { 824 switch input := input.(type) {
......
1 package influxql 1 package influxql
2 2
3 import ( 3 import (
4 + "container/heap"
4 "math" 5 "math"
6 + "sort"
5 "time" 7 "time"
6 8
7 "github.com/influxdata/influxdb/influxql/neldermead" 9 "github.com/influxdata/influxdb/influxql/neldermead"
...@@ -987,3 +989,175 @@ func (r *IntegerIntegralReducer) Close() error { ...@@ -987,3 +989,175 @@ func (r *IntegerIntegralReducer) Close() error {
987 close(r.ch) 989 close(r.ch)
988 return nil 990 return nil
989 } 991 }
992 +
993 +type FloatTopReducer struct {
994 + h *floatPointsByFunc
995 +}
996 +
997 +func NewFloatTopReducer(n int) *FloatTopReducer {
998 + return &FloatTopReducer{
999 + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {
1000 + if a.Value != b.Value {
1001 + return a.Value < b.Value
1002 + }
1003 + return a.Time > b.Time
1004 + }),
1005 + }
1006 +}
1007 +
1008 +func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) {
1009 + if r.h.Len() == cap(r.h.points) {
1010 + // Compare the minimum point and the aggregated point. If our value is
1011 + // larger, replace the current min value.
1012 + if !r.h.cmp(&r.h.points[0], p) {
1013 + return
1014 + }
1015 + r.h.points[0] = *p
1016 + heap.Fix(r.h, 0)
1017 + return
1018 + }
1019 + heap.Push(r.h, *p)
1020 +}
1021 +
1022 +func (r *FloatTopReducer) Emit() []FloatPoint {
1023 + // Ensure the points are sorted with the maximum value last. While the
1024 + // first point may be the minimum value, the rest is not guaranteed to be
1025 + // in any particular order while it is a heap.
1026 + points := make([]FloatPoint, len(r.h.points))
1027 + for i, p := range r.h.points {
1028 + p.Aggregated = 0
1029 + points[i] = p
1030 + }
1031 + h := floatPointsByFunc{points: points, cmp: r.h.cmp}
1032 + sort.Sort(sort.Reverse(&h))
1033 + return points
1034 +}
1035 +
1036 +type IntegerTopReducer struct {
1037 + h *integerPointsByFunc
1038 +}
1039 +
1040 +func NewIntegerTopReducer(n int) *IntegerTopReducer {
1041 + return &IntegerTopReducer{
1042 + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {
1043 + if a.Value != b.Value {
1044 + return a.Value < b.Value
1045 + }
1046 + return a.Time > b.Time
1047 + }),
1048 + }
1049 +}
1050 +
1051 +func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) {
1052 + if r.h.Len() == cap(r.h.points) {
1053 + // Compare the minimum point and the aggregated point. If our value is
1054 + // larger, replace the current min value.
1055 + if !r.h.cmp(&r.h.points[0], p) {
1056 + return
1057 + }
1058 + r.h.points[0] = *p
1059 + heap.Fix(r.h, 0)
1060 + return
1061 + }
1062 + heap.Push(r.h, *p)
1063 +}
1064 +
1065 +func (r *IntegerTopReducer) Emit() []IntegerPoint {
1066 + // Ensure the points are sorted with the maximum value last. While the
1067 + // first point may be the minimum value, the rest is not guaranteed to be
1068 + // in any particular order while it is a heap.
1069 + points := make([]IntegerPoint, len(r.h.points))
1070 + for i, p := range r.h.points {
1071 + p.Aggregated = 0
1072 + points[i] = p
1073 + }
1074 + h := integerPointsByFunc{points: points, cmp: r.h.cmp}
1075 + sort.Sort(sort.Reverse(&h))
1076 + return points
1077 +}
1078 +
1079 +type FloatBottomReducer struct {
1080 + h *floatPointsByFunc
1081 +}
1082 +
1083 +func NewFloatBottomReducer(n int) *FloatBottomReducer {
1084 + return &FloatBottomReducer{
1085 + h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {
1086 + if a.Value != b.Value {
1087 + return a.Value > b.Value
1088 + }
1089 + return a.Time > b.Time
1090 + }),
1091 + }
1092 +}
1093 +
1094 +func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) {
1095 + if r.h.Len() == cap(r.h.points) {
1096 + // Compare the minimum point and the aggregated point. If our value is
1097 + // larger, replace the current min value.
1098 + if !r.h.cmp(&r.h.points[0], p) {
1099 + return
1100 + }
1101 + r.h.points[0] = *p
1102 + heap.Fix(r.h, 0)
1103 + return
1104 + }
1105 + heap.Push(r.h, *p)
1106 +}
1107 +
1108 +func (r *FloatBottomReducer) Emit() []FloatPoint {
1109 + // Ensure the points are sorted with the maximum value last. While the
1110 + // first point may be the minimum value, the rest is not guaranteed to be
1111 + // in any particular order while it is a heap.
1112 + points := make([]FloatPoint, len(r.h.points))
1113 + for i, p := range r.h.points {
1114 + p.Aggregated = 0
1115 + points[i] = p
1116 + }
1117 + h := floatPointsByFunc{points: points, cmp: r.h.cmp}
1118 + sort.Sort(sort.Reverse(&h))
1119 + return points
1120 +}
1121 +
1122 +type IntegerBottomReducer struct {
1123 + h *integerPointsByFunc
1124 +}
1125 +
1126 +func NewIntegerBottomReducer(n int) *IntegerBottomReducer {
1127 + return &IntegerBottomReducer{
1128 + h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {
1129 + if a.Value != b.Value {
1130 + return a.Value > b.Value
1131 + }
1132 + return a.Time > b.Time
1133 + }),
1134 + }
1135 +}
1136 +
1137 +func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) {
1138 + if r.h.Len() == cap(r.h.points) {
1139 + // Compare the minimum point and the aggregated point. If our value is
1140 + // larger, replace the current min value.
1141 + if !r.h.cmp(&r.h.points[0], p) {
1142 + return
1143 + }
1144 + r.h.points[0] = *p
1145 + heap.Fix(r.h, 0)
1146 + return
1147 + }
1148 + heap.Push(r.h, *p)
1149 +}
1150 +
1151 +func (r *IntegerBottomReducer) Emit() []IntegerPoint {
1152 + // Ensure the points are sorted with the maximum value last. While the
1153 + // first point may be the minimum value, the rest is not guaranteed to be
1154 + // in any particular order while it is a heap.
1155 + points := make([]IntegerPoint, len(r.h.points))
1156 + for i, p := range r.h.points {
1157 + p.Aggregated = 0
1158 + points[i] = p
1159 + }
1160 + h := integerPointsByFunc{points: points, cmp: r.h.cmp}
1161 + sort.Sort(sort.Reverse(&h))
1162 + return points
1163 +}
......
...@@ -1063,7 +1063,13 @@ type {{$k.name}}Reduce{{$v.Name}}Point struct { ...@@ -1063,7 +1063,13 @@ type {{$k.name}}Reduce{{$v.Name}}Point struct {
1063 // The previous value for the dimension is passed to fn. 1063 // The previous value for the dimension is passed to fn.
1064 func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { 1064 func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) {
1065 // Calculate next window. 1065 // Calculate next window.
1066 - var startTime, endTime int64 1066 + var (
1067 + startTime, endTime int64
1068 + window struct {
1069 + name string
1070 + tags string
1071 + }
1072 + )
1067 for { 1073 for {
1068 p, err := itr.input.Next() 1074 p, err := itr.input.Next()
1069 if err != nil || p == nil { 1075 if err != nil || p == nil {
...@@ -1075,6 +1081,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e ...@@ -1075,6 +1081,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
1075 // Unread the point so it can be processed. 1081 // Unread the point so it can be processed.
1076 itr.input.unread(p) 1082 itr.input.unread(p)
1077 startTime, endTime = itr.opt.Window(p.Time) 1083 startTime, endTime = itr.opt.Window(p.Time)
1084 + window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
1078 break 1085 break
1079 } 1086 }
1080 1087
...@@ -1089,14 +1096,25 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e ...@@ -1089,14 +1096,25 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
1089 break 1096 break
1090 } else if curr.Nil { 1097 } else if curr.Nil {
1091 continue 1098 continue
1099 + } else if curr.Name != window.name {
1100 + itr.input.unread(curr)
1101 + break
1092 } 1102 }
1093 - tags := curr.Tags.Subset(itr.dims)
1094 1103
1095 - id := curr.Name 1104 + // Ensure this point is within the same final window.
1096 - if len(tags.m) > 0 { 1105 + if curr.Name != window.name {
1097 - id += "\x00" + tags.ID() 1106 + itr.input.unread(curr)
1107 + break
1108 + } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
1109 + itr.input.unread(curr)
1110 + break
1098 } 1111 }
1099 1112
1113 + // Retrieve the tags on this point for this level of the query.
1114 + // This may be different than the bucket dimensions.
1115 + tags := curr.Tags.Subset(itr.dims)
1116 + id := tags.ID()
1117 +
1100 // Retrieve the aggregator for this name/tag combination or create one. 1118 // Retrieve the aggregator for this name/tag combination or create one.
1101 rp := m[id] 1119 rp := m[id]
1102 if rp == nil { 1120 if rp == nil {
...@@ -1112,17 +1130,18 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e ...@@ -1112,17 +1130,18 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
1112 rp.Aggregator.Aggregate{{$k.Name}}(curr) 1130 rp.Aggregator.Aggregate{{$k.Name}}(curr)
1113 } 1131 }
1114 1132
1115 - // Reverse sort points by name & tag. 1133 + // Reverse sort points by name & tag if our output is supposed to be ordered.
1116 keys := make([]string, 0, len(m)) 1134 keys := make([]string, 0, len(m))
1117 for k := range m { 1135 for k := range m {
1118 keys = append(keys, k) 1136 keys = append(keys, k)
1119 } 1137 }
1120 - if len(keys) > 1 { 1138 + if len(keys) > 1 && itr.opt.Ordered {
1121 sort.Sort(reverseStringSlice(keys)) 1139 sort.Sort(reverseStringSlice(keys))
1122 } 1140 }
1123 1141
1124 // Assume the points are already sorted until proven otherwise. 1142 // Assume the points are already sorted until proven otherwise.
1125 sortedByTime := true 1143 sortedByTime := true
1144 + // Emit the points for each name & tag combination.
1126 a := make([]{{$v.Name}}Point, 0, len(m)) 1145 a := make([]{{$v.Name}}Point, 0, len(m))
1127 for _, k := range keys { 1146 for _, k := range keys {
1128 rp := m[k] 1147 rp := m[k]
...@@ -1484,6 +1503,70 @@ type {{$k.name}}DedupeIterator struct { ...@@ -1484,6 +1503,70 @@ type {{$k.name}}DedupeIterator struct {
1484 m map[string]struct{} // lookup of points already sent 1503 m map[string]struct{} // lookup of points already sent
1485 } 1504 }
1486 1505
1506 +type {{$k.name}}IteratorMapper struct {
1507 + e *Emitter
1508 + buf []interface{}
1509 + driver IteratorMap // which iterator to use for the primary value, can be nil
1510 + fields []IteratorMap // which iterator to use for an aux field
1511 + point {{$k.Name}}Point
1512 +}
1513 +
1514 +func new{{$k.Name}}IteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper {
1515 + e := NewEmitter(itrs, opt.Ascending, 0)
1516 + e.OmitTime = true
1517 + return &{{$k.name}}IteratorMapper{
1518 + e: e,
1519 + buf: make([]interface{}, len(itrs)),
1520 + driver: driver,
1521 + fields: fields,
1522 + point: {{$k.Name}}Point{
1523 + Aux: make([]interface{}, len(fields)),
1524 + },
1525 + }
1526 +}
1527 +
1528 +func (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) {
1529 + t, name, tags, err := itr.e.loadBuf()
1530 + if err != nil || t == ZeroTime {
1531 + return nil, err
1532 + }
1533 + itr.point.Time = t
1534 + itr.point.Name = name
1535 + itr.point.Tags = tags
1536 +
1537 + itr.e.readInto(t, name, tags, itr.buf)
1538 + if itr.driver != nil {
1539 + if v := itr.driver.Value(tags, itr.buf); v != nil {
1540 + if v, ok := v.({{$k.Type}}); ok {
1541 + itr.point.Value = v
1542 + itr.point.Nil = false
1543 + } else {
1544 + itr.point.Value = {{$k.Nil}}
1545 + itr.point.Nil = true
1546 + }
1547 + } else {
1548 + itr.point.Value = {{$k.Nil}}
1549 + itr.point.Nil = true
1550 + }
1551 + }
1552 + for i, f := range itr.fields {
1553 + itr.point.Aux[i] = f.Value(tags, itr.buf)
1554 + }
1555 + return &itr.point, nil
1556 +}
1557 +
1558 +func (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats {
1559 + stats := IteratorStats{}
1560 + for _, itr := range itr.e.itrs {
1561 + stats.Add(itr.Stats())
1562 + }
1563 + return stats
1564 +}
1565 +
1566 +func (itr *{{$k.name}}IteratorMapper) Close() error {
1567 + return itr.e.Close()
1568 +}
1569 +
1487 type {{$k.name}}FilterIterator struct { 1570 type {{$k.name}}FilterIterator struct {
1488 input {{$k.Name}}Iterator 1571 input {{$k.Name}}Iterator
1489 cond Expr 1572 cond Expr
......
...@@ -740,8 +740,9 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite ...@@ -740,8 +740,9 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite
740 } 740 }
741 opt.Interval.Duration = interval 741 opt.Interval.Duration = interval
742 742
743 - // Determine if the input for this select call must be ordered. 743 + // Always request an ordered output for the top level iterators.
744 - opt.Ordered = stmt.IsRawQuery 744 + // The emitter will always emit points as ordered.
745 + opt.Ordered = true
745 746
746 // Determine dimensions. 747 // Determine dimensions.
747 opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions)) 748 opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions))
...@@ -805,17 +806,15 @@ func newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions) ...@@ -805,17 +806,15 @@ func newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions)
805 subOpt.Fill = NoFill 806 subOpt.Fill = NoFill
806 } 807 }
807 808
808 - // Determine if the input to this iterator needs to be ordered so it outputs 809 + // Inherit the ordering method from the outer query.
809 - // the correct order to the outer query. 810 + subOpt.Ordered = opt.Ordered
810 - interval, err := stmt.GroupByInterval()
811 - if err != nil {
812 - return IteratorOptions{}, err
813 - }
814 - subOpt.Ordered = opt.Ordered && (interval == 0 && stmt.HasSelector())
815 811
816 // If there is no interval for this subquery, but the outer query has an 812 // If there is no interval for this subquery, but the outer query has an
817 // interval, inherit the parent interval. 813 // interval, inherit the parent interval.
818 - if interval == 0 { 814 + interval, err := stmt.GroupByInterval()
815 + if err != nil {
816 + return IteratorOptions{}, err
817 + } else if interval == 0 {
819 subOpt.Interval = opt.Interval 818 subOpt.Interval = opt.Interval
820 } 819 }
821 return subOpt, nil 820 return subOpt, nil
......
1 package influxql 1 package influxql
2 2
3 -type iteratorMapper struct { 3 +import "fmt"
4 - e *Emitter
5 - buf []interface{}
6 - fields []IteratorMap // which iterator to use for an aux field
7 - auxFields []interface{}
8 -}
9 4
10 type IteratorMap interface { 5 type IteratorMap interface {
11 Value(tags Tags, buf []interface{}) interface{} 6 Value(tags Tags, buf []interface{}) interface{}
...@@ -19,43 +14,31 @@ type TagMap string ...@@ -19,43 +14,31 @@ type TagMap string
19 14
20 func (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) } 15 func (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) }
21 16
22 -func NewIteratorMapper(itrs []Iterator, fields []IteratorMap, opt IteratorOptions) Iterator { 17 +type NullMap struct{}
23 - e := NewEmitter(itrs, opt.Ascending, 0) 18 +
24 - e.OmitTime = true 19 +func (NullMap) Value(tags Tags, buf []interface{}) interface{} { return nil }
25 - return &iteratorMapper{ 20 +
26 - e: e, 21 +func NewIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator {
27 - buf: make([]interface{}, len(itrs)), 22 + if driver != nil {
28 - fields: fields, 23 + switch driver := driver.(type) {
29 - auxFields: make([]interface{}, len(fields)), 24 + case FieldMap:
25 + switch itrs[int(driver)].(type) {
26 + case FloatIterator:
27 + return newFloatIteratorMapper(itrs, driver, fields, opt)
28 + case IntegerIterator:
29 + return newIntegerIteratorMapper(itrs, driver, fields, opt)
30 + case StringIterator:
31 + return newStringIteratorMapper(itrs, driver, fields, opt)
32 + case BooleanIterator:
33 + return newBooleanIteratorMapper(itrs, driver, fields, opt)
34 + default:
35 + panic(fmt.Sprintf("unable to map iterator type: %T", itrs[int(driver)]))
30 } 36 }
31 -} 37 + case TagMap:
32 - 38 + return newStringIteratorMapper(itrs, driver, fields, opt)
33 -func (itr *iteratorMapper) Next() (*FloatPoint, error) { 39 + default:
34 - t, name, tags, err := itr.e.loadBuf() 40 + panic(fmt.Sprintf("unable to create iterator mapper with driveression type: %T", driver))
35 - if err != nil || t == ZeroTime {
36 - return nil, err
37 } 41 }
38 -
39 - itr.e.readInto(t, name, tags, itr.buf)
40 - for i, f := range itr.fields {
41 - itr.auxFields[i] = f.Value(tags, itr.buf)
42 } 42 }
43 - return &FloatPoint{ 43 + return newFloatIteratorMapper(itrs, nil, fields, opt)
44 - Name: name,
45 - Tags: tags,
46 - Time: t,
47 - Aux: itr.auxFields,
48 - }, nil
49 -}
50 -
51 -func (itr *iteratorMapper) Stats() IteratorStats {
52 - stats := IteratorStats{}
53 - for _, itr := range itr.e.itrs {
54 - stats.Add(itr.Stats())
55 - }
56 - return stats
57 -}
58 -
59 -func (itr *iteratorMapper) Close() error {
60 - return itr.e.Close()
61 } 44 }
......
...@@ -31,7 +31,7 @@ func TestIteratorMapper(t *testing.T) { ...@@ -31,7 +31,7 @@ func TestIteratorMapper(t *testing.T) {
31 {Val: "val2", Type: influxql.String}, 31 {Val: "val2", Type: influxql.String},
32 }, 32 },
33 } 33 }
34 - itr := influxql.NewIteratorMapper(inputs, []influxql.IteratorMap{ 34 + itr := influxql.NewIteratorMapper(inputs, nil, []influxql.IteratorMap{
35 influxql.FieldMap(0), 35 influxql.FieldMap(0),
36 influxql.FieldMap(1), 36 influxql.FieldMap(1),
37 influxql.TagMap("host"), 37 influxql.TagMap("host"),
......
...@@ -2838,6 +2838,14 @@ func ParseDuration(s string) (time.Duration, error) { ...@@ -2838,6 +2838,14 @@ func ParseDuration(s string) (time.Duration, error) {
2838 // Otherwise just use the last character as the unit of measure. 2838 // Otherwise just use the last character as the unit of measure.
2839 unit = string(a[i]) 2839 unit = string(a[i])
2840 switch a[i] { 2840 switch a[i] {
2841 + case 'n':
2842 + if i+1 < len(a) && a[i+1] == 's' {
2843 + unit = string(a[i : i+2])
2844 + d += time.Duration(n)
2845 + i += 2
2846 + continue
2847 + }
2848 + return 0, ErrInvalidDuration
2841 case 'u', 'µ': 2849 case 'u', 'µ':
2842 d += time.Duration(n) * time.Microsecond 2850 d += time.Duration(n) * time.Microsecond
2843 case 'm': 2851 case 'm':
......
...@@ -2606,12 +2606,12 @@ func TestParser_ParseStatement(t *testing.T) { ...@@ -2606,12 +2606,12 @@ func TestParser_ParseStatement(t *testing.T) {
2606 {s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`}, 2606 {s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
2607 {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`}, 2607 {s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`},
2608 {s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`}, 2608 {s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
2609 - {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, 2609 + {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
2610 - {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, 2610 + {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
2611 {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`}, 2611 {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`},
2612 {s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`}, 2612 {s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`},
2613 {s: `SELECT distinct FROM myseries`, err: `found FROM, expected identifier at line 1, char 17`}, 2613 {s: `SELECT distinct FROM myseries`, err: `found FROM, expected identifier at line 1, char 17`},
2614 - {s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`}, 2614 + {s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
2615 {s: `SELECT count(distinct) FROM myseries`, err: `found ), expected (, identifier at line 1, char 22`}, 2615 {s: `SELECT count(distinct) FROM myseries`, err: `found ), expected (, identifier at line 1, char 22`},
2616 {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct <field>) can only have one argument`}, 2616 {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct <field>) can only have one argument`},
2617 {s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct <field>) can only have one argument`}, 2617 {s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct <field>) can only have one argument`},
...@@ -2680,6 +2680,8 @@ func TestParser_ParseStatement(t *testing.T) { ...@@ -2680,6 +2680,8 @@ func TestParser_ParseStatement(t *testing.T) {
2680 {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`}, 2680 {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`},
2681 {s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`}, 2681 {s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`},
2682 {s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h))`, err: `aggregate functions with GROUP BY time require a WHERE time clause`}, 2682 {s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h))`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},
2683 + {s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`},
2684 + {s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},
2683 // See issues https://github.com/influxdata/influxdb/issues/1647 2685 // See issues https://github.com/influxdata/influxdb/issues/1647
2684 // and https://github.com/influxdata/influxdb/issues/4404 2686 // and https://github.com/influxdata/influxdb/issues/4404
2685 //{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`}, 2687 //{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
...@@ -3250,6 +3252,7 @@ func TestParseDuration(t *testing.T) { ...@@ -3250,6 +3252,7 @@ func TestParseDuration(t *testing.T) {
3250 d time.Duration 3252 d time.Duration
3251 err string 3253 err string
3252 }{ 3254 }{
3255 + {s: `10ns`, d: 10},
3253 {s: `10u`, d: 10 * time.Microsecond}, 3256 {s: `10u`, d: 10 * time.Microsecond},
3254 {s: `10µ`, d: 10 * time.Microsecond}, 3257 {s: `10µ`, d: 10 * time.Microsecond},
3255 {s: `15ms`, d: 15 * time.Millisecond}, 3258 {s: `15ms`, d: 15 * time.Millisecond},
...@@ -3270,6 +3273,7 @@ func TestParseDuration(t *testing.T) { ...@@ -3270,6 +3273,7 @@ func TestParseDuration(t *testing.T) {
3270 {s: `ms`, err: "invalid duration"}, 3273 {s: `ms`, err: "invalid duration"},
3271 {s: `1.2w`, err: "invalid duration"}, 3274 {s: `1.2w`, err: "invalid duration"},
3272 {s: `10x`, err: "invalid duration"}, 3275 {s: `10x`, err: "invalid duration"},
3276 + {s: `10n`, err: "invalid duration"},
3273 } 3277 }
3274 3278
3275 for i, tt := range tests { 3279 for i, tt := range tests {
......
1 +package influxql
2 +
3 +type subqueryBuilder struct {
4 + ic IteratorCreator
5 + stmt *SelectStatement
6 +}
7 +
8 +// buildAuxIterator constructs an auxiliary Iterator from a subquery.
9 +func (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error) {
10 + // Retrieve a list of fields needed for conditions.
11 + auxFields := opt.Aux
12 + conds := ExprNames(opt.Condition)
13 + if len(conds) > 0 {
14 + auxFields = make([]VarRef, len(opt.Aux)+len(conds))
15 + copy(auxFields, opt.Aux)
16 + copy(auxFields[len(opt.Aux):], conds)
17 + }
18 +
19 + // Map the desired auxiliary fields from the substatement.
20 + indexes := b.mapAuxFields(auxFields)
21 + subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
22 + if err != nil {
23 + return nil, err
24 + }
25 + subOpt.Aux = auxFields
26 +
27 + itrs, err := buildIterators(b.stmt, b.ic, subOpt)
28 + if err != nil {
29 + return nil, err
30 + }
31 +
32 + // Construct the iterators for the subquery.
33 + input := NewIteratorMapper(itrs, nil, indexes, subOpt)
34 + // If there is a condition, filter it now.
35 + if opt.Condition != nil {
36 + input = NewFilterIterator(input, opt.Condition, subOpt)
37 + }
38 + return input, nil
39 +}
40 +
41 +func (b *subqueryBuilder) mapAuxFields(auxFields []VarRef) []IteratorMap {
42 + indexes := make([]IteratorMap, len(auxFields))
43 + for i, name := range auxFields {
44 + m := b.mapAuxField(&name)
45 + if m == nil {
46 + // If this field doesn't map to anything, use the NullMap so it
47 + // shows up as null.
48 + m = NullMap{}
49 + }
50 + indexes[i] = m
51 + }
52 + return indexes
53 +}
54 +
55 +func (b *subqueryBuilder) mapAuxField(name *VarRef) IteratorMap {
56 + offset := 0
57 + for i, f := range b.stmt.Fields {
58 + if f.Name() == name.Val {
59 + return FieldMap(i + offset)
60 + } else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") {
61 + // We may match one of the arguments in "top" or "bottom".
62 + if len(call.Args) > 2 {
63 + for j, arg := range call.Args[1 : len(call.Args)-1] {
64 + if arg, ok := arg.(*VarRef); ok && arg.Val == name.Val {
65 + return FieldMap(i + j + 1)
66 + }
67 + }
68 + // Increment the offset so we have the correct index for later fields.
69 + offset += len(call.Args) - 2
70 + }
71 + }
72 + }
73 +
74 + // Unable to find this in the list of fields.
75 + // Look within the dimensions and create a field if we find it.
76 + for _, d := range b.stmt.Dimensions {
77 + if d, ok := d.Expr.(*VarRef); ok && name.Val == d.Val {
78 + return TagMap(d.Val)
79 + }
80 + }
81 +
82 + // Unable to find any matches.
83 + return nil
84 +}
85 +
86 +func (b *subqueryBuilder) buildVarRefIterator(expr *VarRef, opt IteratorOptions) (Iterator, error) {
87 + // Look for the field or tag that is driving this query.
88 + driver := b.mapAuxField(expr)
89 + if driver == nil {
90 + // Exit immediately if there is no driver. If there is no driver, there
91 + // are no results. Period.
92 + return nil, nil
93 + }
94 +
95 + // Determine necessary auxiliary fields for this query.
96 + auxFields := opt.Aux
97 + conds := ExprNames(opt.Condition)
98 + if len(conds) > 0 && len(opt.Aux) > 0 {
99 + // Combine the auxiliary fields requested with the ones in the condition.
100 + auxFields = make([]VarRef, len(opt.Aux)+len(conds))
101 + copy(auxFields, opt.Aux)
102 + copy(auxFields[len(opt.Aux):], conds)
103 + } else if len(conds) > 0 {
104 + // Set the auxiliary fields to what is in the condition since we have
105 + // requested none in the query itself.
106 + auxFields = conds
107 + }
108 +
109 + // Map the auxiliary fields to their index in the subquery.
110 + indexes := b.mapAuxFields(auxFields)
111 + subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
112 + if err != nil {
113 + return nil, err
114 + }
115 + subOpt.Aux = auxFields
116 +
117 + itrs, err := buildIterators(b.stmt, b.ic, subOpt)
118 + if err != nil {
119 + return nil, err
120 + }
121 +
122 + // Construct the iterators for the subquery.
123 + input := NewIteratorMapper(itrs, driver, indexes, subOpt)
124 + // If there is a condition, filter it now.
125 + if opt.Condition != nil {
126 + input = NewFilterIterator(input, opt.Condition, subOpt)
127 + }
128 + return input, nil
129 +}
...@@ -34,6 +34,8 @@ type MetaClientMock struct { ...@@ -34,6 +34,8 @@ type MetaClientMock struct {
34 34
35 RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) 35 RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
36 36
37 + AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
38 + AdminUserExistsFn func() bool
37 SetAdminPrivilegeFn func(username string, admin bool) error 39 SetAdminPrivilegeFn func(username string, admin bool) error
38 SetDataFn func(*meta.Data) error 40 SetDataFn func(*meta.Data) error
39 SetPrivilegeFn func(username, database string, p influxql.Privilege) error 41 SetPrivilegeFn func(username, database string, p influxql.Privilege) error
...@@ -43,6 +45,7 @@ type MetaClientMock struct { ...@@ -43,6 +45,7 @@ type MetaClientMock struct {
43 UpdateUserFn func(name, password string) error 45 UpdateUserFn func(name, password string) error
44 UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) 46 UserPrivilegeFn func(username, database string) (*influxql.Privilege, error)
45 UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) 47 UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error)
48 + UserFn func(username string) (*meta.UserInfo, error)
46 UsersFn func() []meta.UserInfo 49 UsersFn func() []meta.UserInfo
47 } 50 }
48 51
...@@ -150,6 +153,12 @@ func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Pr ...@@ -150,6 +153,12 @@ func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Pr
150 return c.UserPrivilegesFn(username) 153 return c.UserPrivilegesFn(username)
151 } 154 }
152 155
156 +func (c *MetaClientMock) Authenticate(username, password string) (*meta.UserInfo, error) {
157 + return c.AuthenticateFn(username, password)
158 +}
159 +func (c *MetaClientMock) AdminUserExists() bool { return c.AdminUserExistsFn() }
160 +
161 +func (c *MetaClientMock) User(username string) (*meta.UserInfo, error) { return c.UserFn(username) }
153 func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() } 162 func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() }
154 163
155 func (c *MetaClientMock) Open() error { return c.OpenFn() } 164 func (c *MetaClientMock) Open() error { return c.OpenFn() }
......
...@@ -46,7 +46,7 @@ const ( ...@@ -46,7 +46,7 @@ const (
46 // Point defines the values that will be written to the database. 46 // Point defines the values that will be written to the database.
47 type Point interface { 47 type Point interface {
48 // Name return the measurement name for the point. 48 // Name return the measurement name for the point.
49 - Name() string 49 + Name() []byte
50 50
51 // SetName updates the measurement name for the point. 51 // SetName updates the measurement name for the point.
52 SetName(string) 52 SetName(string)
...@@ -60,6 +60,9 @@ type Point interface { ...@@ -60,6 +60,9 @@ type Point interface {
60 // SetTags replaces the tags for the point. 60 // SetTags replaces the tags for the point.
61 SetTags(tags Tags) 61 SetTags(tags Tags)
62 62
63 + // HasTag returns true if the tag exists for the point.
64 + HasTag(tag []byte) bool
65 +
63 // Fields returns the fields for the point. 66 // Fields returns the fields for the point.
64 Fields() (Fields, error) 67 Fields() (Fields, error)
65 68
...@@ -159,9 +162,6 @@ type FieldIterator interface { ...@@ -159,9 +162,6 @@ type FieldIterator interface {
159 // FloatValue returns the float value of the current field. 162 // FloatValue returns the float value of the current field.
160 FloatValue() (float64, error) 163 FloatValue() (float64, error)
161 164
162 - // Delete deletes the current field.
163 - Delete()
164 -
165 // Reset resets the iterator to its initial state. 165 // Reset resets the iterator to its initial state.
166 Reset() 166 Reset()
167 } 167 }
...@@ -251,6 +251,20 @@ func ParseKey(buf []byte) (string, Tags, error) { ...@@ -251,6 +251,20 @@ func ParseKey(buf []byte) (string, Tags, error) {
251 return string(buf[:i]), tags, nil 251 return string(buf[:i]), tags, nil
252 } 252 }
253 253
254 +func ParseTags(buf []byte) (Tags, error) {
255 + return parseTags(buf), nil
256 +}
257 +
258 +func ParseName(buf []byte) ([]byte, error) {
259 + // Ignore the error because scanMeasurement returns "missing fields" which we ignore
260 + // when just parsing a key
261 + state, i, _ := scanMeasurement(buf, 0)
262 + if state == tagKeyState {
263 + return buf[:i-1], nil
264 + }
265 + return buf[:i], nil
266 +}
267 +
254 // ParsePointsWithPrecision is similar to ParsePoints, but allows the 268 // ParsePointsWithPrecision is similar to ParsePoints, but allows the
255 // caller to provide a precision for time. 269 // caller to provide a precision for time.
256 // 270 //
...@@ -1316,13 +1330,8 @@ func (p *point) name() []byte { ...@@ -1316,13 +1330,8 @@ func (p *point) name() []byte {
1316 return name 1330 return name
1317 } 1331 }
1318 1332
1319 -// Name return the measurement name for the point. 1333 +func (p *point) Name() []byte {
1320 -func (p *point) Name() string { 1334 + return escape.Unescape(p.name())
1321 - if p.cachedName != "" {
1322 - return p.cachedName
1323 - }
1324 - p.cachedName = string(escape.Unescape(p.name()))
1325 - return p.cachedName
1326 } 1335 }
1327 1336
1328 // SetName updates the measurement name for the point. 1337 // SetName updates the measurement name for the point.
...@@ -1355,21 +1364,36 @@ func (p *point) Tags() Tags { ...@@ -1355,21 +1364,36 @@ func (p *point) Tags() Tags {
1355 return p.cachedTags 1364 return p.cachedTags
1356 } 1365 }
1357 1366
1358 -func parseTags(buf []byte) Tags { 1367 +func (p *point) HasTag(tag []byte) bool {
1368 + if len(p.key) == 0 {
1369 + return false
1370 + }
1371 +
1372 + var exists bool
1373 + walkTags(p.key, func(key, value []byte) bool {
1374 + if bytes.Equal(tag, key) {
1375 + exists = true
1376 + return false
1377 + }
1378 + return true
1379 + })
1380 +
1381 + return exists
1382 +}
1383 +
1384 +func walkTags(buf []byte, fn func(key, value []byte) bool) {
1359 if len(buf) == 0 { 1385 if len(buf) == 0 {
1360 - return nil 1386 + return
1361 } 1387 }
1362 1388
1363 pos, name := scanTo(buf, 0, ',') 1389 pos, name := scanTo(buf, 0, ',')
1364 1390
1365 // it's an empty key, so there are no tags 1391 // it's an empty key, so there are no tags
1366 if len(name) == 0 { 1392 if len(name) == 0 {
1367 - return nil 1393 + return
1368 } 1394 }
1369 1395
1370 - tags := make(Tags, 0, bytes.Count(buf, []byte(",")))
1371 hasEscape := bytes.IndexByte(buf, '\\') != -1 1396 hasEscape := bytes.IndexByte(buf, '\\') != -1
1372 -
1373 i := pos + 1 1397 i := pos + 1
1374 var key, value []byte 1398 var key, value []byte
1375 for { 1399 for {
...@@ -1384,14 +1408,29 @@ func parseTags(buf []byte) Tags { ...@@ -1384,14 +1408,29 @@ func parseTags(buf []byte) Tags {
1384 } 1408 }
1385 1409
1386 if hasEscape { 1410 if hasEscape {
1387 - tags = append(tags, NewTag(unescapeTag(key), unescapeTag(value))) 1411 + if !fn(unescapeTag(key), unescapeTag(value)) {
1412 + return
1413 + }
1388 } else { 1414 } else {
1389 - tags = append(tags, NewTag(key, value)) 1415 + if !fn(key, value) {
1416 + return
1417 + }
1390 } 1418 }
1391 1419
1392 i++ 1420 i++
1393 } 1421 }
1422 +}
1423 +
1424 +func parseTags(buf []byte) Tags {
1425 + if len(buf) == 0 {
1426 + return nil
1427 + }
1394 1428
1429 + tags := make(Tags, 0, bytes.Count(buf, []byte(",")))
1430 + walkTags(buf, func(key, value []byte) bool {
1431 + tags = append(tags, NewTag(key, value))
1432 + return true
1433 + })
1395 return tags 1434 return tags
1396 } 1435 }
1397 1436
...@@ -1404,7 +1443,7 @@ func MakeKey(name []byte, tags Tags) []byte { ...@@ -1404,7 +1443,7 @@ func MakeKey(name []byte, tags Tags) []byte {
1404 1443
1405 // SetTags replaces the tags for the point. 1444 // SetTags replaces the tags for the point.
1406 func (p *point) SetTags(tags Tags) { 1445 func (p *point) SetTags(tags Tags) {
1407 - p.key = MakeKey([]byte(p.Name()), tags) 1446 + p.key = MakeKey(p.Name(), tags)
1408 p.cachedTags = tags 1447 p.cachedTags = tags
1409 } 1448 }
1410 1449
...@@ -1414,7 +1453,7 @@ func (p *point) AddTag(key, value string) { ...@@ -1414,7 +1453,7 @@ func (p *point) AddTag(key, value string) {
1414 tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) 1453 tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
1415 sort.Sort(tags) 1454 sort.Sort(tags)
1416 p.cachedTags = tags 1455 p.cachedTags = tags
1417 - p.key = MakeKey([]byte(p.Name()), tags) 1456 + p.key = MakeKey(p.Name(), tags)
1418 } 1457 }
1419 1458
1420 // Fields returns the fields for the point. 1459 // Fields returns the fields for the point.
...@@ -1879,40 +1918,35 @@ func (a Tags) HashKey() []byte { ...@@ -1879,40 +1918,35 @@ func (a Tags) HashKey() []byte {
1879 return nil 1918 return nil
1880 } 1919 }
1881 1920
1921 + // Type invariant: Tags are sorted
1922 +
1882 escaped := make(Tags, 0, len(a)) 1923 escaped := make(Tags, 0, len(a))
1924 + sz := 0
1883 for _, t := range a { 1925 for _, t := range a {
1884 ek := escapeTag(t.Key) 1926 ek := escapeTag(t.Key)
1885 ev := escapeTag(t.Value) 1927 ev := escapeTag(t.Value)
1886 1928
1887 if len(ev) > 0 { 1929 if len(ev) > 0 {
1888 escaped = append(escaped, Tag{Key: ek, Value: ev}) 1930 escaped = append(escaped, Tag{Key: ek, Value: ev})
1931 + sz += len(ek) + len(ev)
1889 } 1932 }
1890 } 1933 }
1891 1934
1892 - // Extract keys and determine final size. 1935 + sz += len(escaped) + (len(escaped) * 2) // separators
1893 - sz := len(escaped) + (len(escaped) * 2) // separators
1894 - keys := make([][]byte, len(escaped)+1)
1895 - for i, t := range escaped {
1896 - keys[i] = t.Key
1897 - sz += len(t.Key) + len(t.Value)
1898 - }
1899 - keys = keys[:len(escaped)]
1900 - sort.Sort(byteSlices(keys))
1901 1936
1902 // Generate marshaled bytes. 1937 // Generate marshaled bytes.
1903 b := make([]byte, sz) 1938 b := make([]byte, sz)
1904 buf := b 1939 buf := b
1905 idx := 0 1940 idx := 0
1906 - for i, k := range keys { 1941 + for _, k := range escaped {
1907 buf[idx] = ',' 1942 buf[idx] = ','
1908 idx++ 1943 idx++
1909 - copy(buf[idx:idx+len(k)], k) 1944 + copy(buf[idx:idx+len(k.Key)], k.Key)
1910 - idx += len(k) 1945 + idx += len(k.Key)
1911 buf[idx] = '=' 1946 buf[idx] = '='
1912 idx++ 1947 idx++
1913 - v := escaped[i].Value 1948 + copy(buf[idx:idx+len(k.Value)], k.Value)
1914 - copy(buf[idx:idx+len(v)], v) 1949 + idx += len(k.Value)
1915 - idx += len(v)
1916 } 1950 }
1917 return b[:idx] 1951 return b[:idx]
1918 } 1952 }
...@@ -2051,26 +2085,6 @@ func (p *point) FloatValue() (float64, error) { ...@@ -2051,26 +2085,6 @@ func (p *point) FloatValue() (float64, error) {
2051 return f, nil 2085 return f, nil
2052 } 2086 }
2053 2087
2054 -// Delete deletes the current field.
2055 -func (p *point) Delete() {
2056 - switch {
2057 - case p.it.end == p.it.start:
2058 - case p.it.end >= len(p.fields):
2059 - // Remove the trailing comma if there are more than one fields
2060 - p.fields = bytes.TrimSuffix(p.fields[:p.it.start], []byte(","))
2061 -
2062 - case p.it.start == 0:
2063 - p.fields = p.fields[p.it.end:]
2064 - default:
2065 - p.fields = append(p.fields[:p.it.start], p.fields[p.it.end:]...)
2066 - }
2067 -
2068 - p.it.end = p.it.start
2069 - p.it.key = nil
2070 - p.it.valueBuf = nil
2071 - p.it.fieldType = Empty
2072 -}
2073 -
2074 // Reset resets the iterator to its initial state. 2088 // Reset resets the iterator to its initial state.
2075 func (p *point) Reset() { 2089 func (p *point) Reset() {
2076 p.it.fieldType = Empty 2090 p.it.fieldType = Empty
......
...@@ -40,6 +40,16 @@ func TestMarshal(t *testing.T) { ...@@ -40,6 +40,16 @@ func TestMarshal(t *testing.T) {
40 } 40 }
41 } 41 }
42 42
43 +func TestTags_HashKey(t *testing.T) {
44 + tags = models.NewTags(map[string]string{"A FOO": "bar", "APPLE": "orange", "host": "serverA", "region": "uswest"})
45 + got := tags.HashKey()
46 + if exp := ",A\\ FOO=bar,APPLE=orange,host=serverA,region=uswest"; string(got) != exp {
47 + t.Log("got: ", string(got))
48 + t.Log("exp: ", exp)
49 + t.Error("invalid match")
50 + }
51 +}
52 +
43 func BenchmarkMarshal(b *testing.B) { 53 func BenchmarkMarshal(b *testing.B) {
44 for i := 0; i < b.N; i++ { 54 for i := 0; i < b.N; i++ {
45 tags.HashKey() 55 tags.HashKey()
...@@ -761,7 +771,7 @@ func TestParsePointWhitespace(t *testing.T) { ...@@ -761,7 +771,7 @@ func TestParsePointWhitespace(t *testing.T) {
761 t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) 771 t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp)
762 } 772 }
763 773
764 - if got, exp := pts[0].Name(), expPoint.Name(); got != exp { 774 + if got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp {
765 t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) 775 t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp)
766 } 776 }
767 777
...@@ -2183,130 +2193,6 @@ m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 ...@@ -2183,130 +2193,6 @@ m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456
2183 } 2193 }
2184 } 2194 }
2185 2195
2186 -func TestPoint_FieldIterator_Delete_Begin(t *testing.T) {
2187 - points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
2188 - if err != nil || len(points) != 1 {
2189 - t.Fatal("failed parsing point")
2190 - }
2191 -
2192 - fi := points[0].FieldIterator()
2193 - fi.Next() // a
2194 - fi.Delete()
2195 -
2196 - fi.Reset()
2197 -
2198 - got := toFields(fi)
2199 - exp := models.Fields{"b": float64(2), "c": float64(3)}
2200 -
2201 - if !reflect.DeepEqual(got, exp) {
2202 - t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
2203 - }
2204 -
2205 - if _, err = models.ParsePointsString(points[0].String()); err != nil {
2206 - t.Fatalf("Failed to parse point: %v", err)
2207 - }
2208 -}
2209 -
2210 -func TestPoint_FieldIterator_Delete_Middle(t *testing.T) {
2211 - points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
2212 - if err != nil || len(points) != 1 {
2213 - t.Fatal("failed parsing point")
2214 - }
2215 -
2216 - fi := points[0].FieldIterator()
2217 - fi.Next() // a
2218 - fi.Next() // b
2219 - fi.Delete()
2220 -
2221 - fi.Reset()
2222 -
2223 - got := toFields(fi)
2224 - exp := models.Fields{"a": float64(1), "c": float64(3)}
2225 -
2226 - if !reflect.DeepEqual(got, exp) {
2227 - t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
2228 - }
2229 -
2230 - if _, err = models.ParsePointsString(points[0].String()); err != nil {
2231 - t.Fatalf("Failed to parse point: %v", err)
2232 - }
2233 -}
2234 -
2235 -func TestPoint_FieldIterator_Delete_End(t *testing.T) {
2236 - points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
2237 - if err != nil || len(points) != 1 {
2238 - t.Fatal("failed parsing point")
2239 - }
2240 -
2241 - fi := points[0].FieldIterator()
2242 - fi.Next() // a
2243 - fi.Next() // b
2244 - fi.Next() // c
2245 - fi.Delete()
2246 -
2247 - fi.Reset()
2248 -
2249 - got := toFields(fi)
2250 - exp := models.Fields{"a": float64(1), "b": float64(2)}
2251 -
2252 - if !reflect.DeepEqual(got, exp) {
2253 - t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
2254 - }
2255 -
2256 - if _, err = models.ParsePointsString(points[0].String()); err != nil {
2257 - t.Fatalf("Failed to parse point: %v", err)
2258 - }
2259 -}
2260 -
2261 -func TestPoint_FieldIterator_Delete_Nothing(t *testing.T) {
2262 - points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
2263 - if err != nil || len(points) != 1 {
2264 - t.Fatal("failed parsing point")
2265 - }
2266 -
2267 - fi := points[0].FieldIterator()
2268 - fi.Delete()
2269 -
2270 - fi.Reset()
2271 -
2272 - got := toFields(fi)
2273 - exp := models.Fields{"a": float64(1), "b": float64(2), "c": float64(3)}
2274 -
2275 - if !reflect.DeepEqual(got, exp) {
2276 - t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
2277 - }
2278 -
2279 - if _, err = models.ParsePointsString(points[0].String()); err != nil {
2280 - t.Fatalf("Failed to parse point: %v", err)
2281 - }
2282 -}
2283 -
2284 -func TestPoint_FieldIterator_Delete_Twice(t *testing.T) {
2285 - points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
2286 - if err != nil || len(points) != 1 {
2287 - t.Fatal("failed parsing point")
2288 - }
2289 -
2290 - fi := points[0].FieldIterator()
2291 - fi.Next() // a
2292 - fi.Next() // b
2293 - fi.Delete()
2294 - fi.Delete() // no-op
2295 -
2296 - fi.Reset()
2297 -
2298 - got := toFields(fi)
2299 - exp := models.Fields{"a": float64(1), "c": float64(3)}
2300 -
2301 - if !reflect.DeepEqual(got, exp) {
2302 - t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
2303 - }
2304 -
2305 - if _, err = models.ParsePointsString(points[0].String()); err != nil {
2306 - t.Fatalf("Failed to parse point: %v", err)
2307 - }
2308 -}
2309 -
2310 func TestEscapeStringField(t *testing.T) { 2196 func TestEscapeStringField(t *testing.T) {
2311 cases := []struct { 2197 cases := []struct {
2312 in string 2198 in string
......
This diff is collapsed. Click to expand it.
...@@ -82,16 +82,16 @@ func (v *compressedList) MarshalBinary() (data []byte, err error) { ...@@ -82,16 +82,16 @@ func (v *compressedList) MarshalBinary() (data []byte, err error) {
82 82
83 func (v *compressedList) UnmarshalBinary(data []byte) error { 83 func (v *compressedList) UnmarshalBinary(data []byte) error {
84 // Set the count. 84 // Set the count.
85 - v.count = binary.BigEndian.Uint32(data[:4]) 85 + v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:]
86 86
87 // Set the last value. 87 // Set the last value.
88 - v.last = binary.BigEndian.Uint32(data[4:8]) 88 + v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:]
89 89
90 // Set the list. 90 // Set the list.
91 - sz := int(data[8]) | int(data[9]) | int(data[10]) | int(data[11]) 91 + sz, data := binary.BigEndian.Uint32(data[:4]), data[4:]
92 - v.b = make([]uint8, 0, sz) 92 + v.b = make([]uint8, sz)
93 - for i := 12; i < sz+12; i++ { 93 + for i := uint32(0); i < sz; i++ {
94 - v.b = append(v.b, uint8(data[i])) 94 + v.b[i] = uint8(data[i])
95 } 95 }
96 return nil 96 return nil
97 } 97 }
......
...@@ -41,3 +41,60 @@ func (p *Bytes) Put(c []byte) { ...@@ -41,3 +41,60 @@ func (p *Bytes) Put(c []byte) {
41 default: 41 default:
42 } 42 }
43 } 43 }
44 +
45 +// LimitedBytes is a pool of byte slices that can be re-used. Slices in
46 +// this pool will not be garbage collected when not in use. The pool will
47 +// hold onto a fixed number of byte slices of a maximum size. If the pool
48 +// is empty and max pool size has not been allocated yet, it will return a
49 +// new byte slice. Byte slices added to the pool that are over the max size
50 +// are dropped.
51 +type LimitedBytes struct {
52 + allocated int64
53 + maxSize int
54 + pool chan []byte
55 +}
56 +
57 +// NewBytes returns a Bytes pool with capacity for max byte slices
58 +// to be pool.
59 +func NewLimitedBytes(capacity int, maxSize int) *LimitedBytes {
60 + return &LimitedBytes{
61 + pool: make(chan []byte, capacity),
62 + maxSize: maxSize,
63 + }
64 +}
65 +
66 +// Get returns a byte slice size with at least sz capacity. Items
67 +// returned may not be in the zero state and should be reset by the
68 +// caller.
69 +func (p *LimitedBytes) Get(sz int) []byte {
70 + var c []byte
71 +
72 + // If we have not allocated our capacity, return a new allocation,
73 + // otherwise block until one frees up.
74 + select {
75 + case c = <-p.pool:
76 + default:
77 + return make([]byte, sz)
78 + }
79 +
80 + if cap(c) < sz {
81 + return make([]byte, sz)
82 + }
83 +
84 + return c[:sz]
85 +}
86 +
87 +// Put returns a slice back to the pool. If the pool is full, the byte
88 +// slice is discarded. If the byte slice is over the configured max size
89 +// of any byte slice in the pool, it is discared.
90 +func (p *LimitedBytes) Put(c []byte) {
91 + // Drop buffers that are larger than the max size
92 + if cap(c) >= p.maxSize {
93 + return
94 + }
95 +
96 + select {
97 + case p.pool <- c:
98 + default:
99 + }
100 +}
......
1 +package pool_test
2 +
3 +import (
4 + "testing"
5 +
6 + "github.com/influxdata/influxdb/pkg/pool"
7 +)
8 +
9 +func TestLimitedBytePool_Put_MaxSize(t *testing.T) {
10 + bp := pool.NewLimitedBytes(1, 10)
11 + bp.Put(make([]byte, 1024)) // should be dropped
12 +
13 + if got, exp := cap(bp.Get(10)), 10; got != exp {
14 + t.Fatalf("max cap size exceeded: got %v, exp %v", got, exp)
15 + }
16 +}
1 -# InfluxDB Admin Interface
2 -
3 -This is the built-in admin interface that ships with InfluxDB. The service is intended to have little overhead and minimal preprocessing steps.
4 -
5 -## How it works
6 -
7 -Static assets, located in the `assets` directory, are embedded in the `influxd` binary and served from memory using a simple fileserver.
8 -
9 -The admin UI itself uses [React](https://github.com/facebook/react) for the user interface to interact directly with the InfluxDB API, usually running on port `8086`.
10 -
11 -## Building
12 -
13 -The only step required to bundle the admin UI with InfluxDB is to create a compressed file system using `statik` as follows:
14 -
15 -```
16 -go get github.com/rakyll/statik # make sure $GOPATH/bin is listed in your PATH
17 -cd $GOPATH/src/github.com/influxdata/influxdb
18 -go generate github.com/influxdata/influxdb/services/admin
19 -```
20 -
21 -The `go generate ./...` command will run `statik` to generate the `statik/statik.go` file. The generated `go` file will embed the admin interface assets into the InfluxDB binary.
22 -
23 -This step should be run before submitting any pull requests which include modifications to admin interface assets.
1 -package admin // import "github.com/influxdata/influxdb/services/admin"
2 -
3 -//go:generate statik -src=assets
4 -//go:generate go fmt statik/statik.go
1 -Please note that these files are embedded into the `influxd` binary using the
2 -[statik](https://github.com/rakyll/statik) tool. `go generate` needs to be run
3 -whenever there are changes made to files in this directory. See the admin
4 -interface readme for more information.
1 -body {
2 - padding-top: 70px;
3 - /* Required padding for .navbar-fixed-top. Remove if using .navbar-static-top. Change if height of navigation changes. */
4 -}
5 -
6 -html,
7 -body {
8 - height: 100%;
9 - /* The html and body elements cannot have any padding or margin. */
10 -}
11 -
12 -code {
13 - display: block;
14 -}
15 -
16 -#settings {
17 - display: none;
18 -}
19 -
20 -#settings form > div {
21 - margin-right: 20px;
22 -}
23 -
24 -#settings form input#port {
25 - width: 80px;
26 -}
27 -
28 -#settings form label {
29 - padding-right: 5px;
30 -}
31 -
32 -div#content {
33 - margin-bottom: -10px;
34 -}
35 -
36 -div#table h2 {
37 - color: #999;
38 - margin-top: -8px;
39 - font-size: 16px
40 -}
41 -
42 -textarea#content-data {
43 - font-family: "Courier New";
44 - height: 200px;
45 -}
46 -
47 -div#query-alerts {
48 - margin-top: 30px;
49 -}
50 -
51 -div#modal-error, div#modal-success, div#query-error, div#query-success {
52 - display: none;
53 -}
54 -
55 -/* Wrapper for page content to push down footer */
56 -#wrap {
57 - min-height: 100%;
58 - height: auto !important;
59 - height: 100%;
60 - /* Negative indent footer by it's height */
61 - margin: 0 auto -60px;
62 -}
63 -
64 -/* Set the fixed height of the footer here */
65 -#push,
66 -#footer {
67 - height: 60px;
68 -}
69 -#footer {
70 - background-color: #f5f5f5;
71 - border-top: 1px solid #dfdfdf;
72 -}
73 -
74 -#footer p {
75 - margin: 20px 0;
76 -}
77 -
78 -/* Lastly, apply responsive CSS fixes as necessary */
79 -@media (max-width: 767px) {
80 - #footer {
81 - margin-left: -20px;
82 - margin-right: -20px;
83 - padding-left: 20px;
84 - padding-right: 20px;
85 - }
86 -}
87 -
This diff could not be displayed because it is too large.
1 -.dropdown-menu > li > label {
2 - display: block;
3 - padding: 3px 20px;
4 - clear: both;
5 - font-weight: normal;
6 - line-height: 1.42857143;
7 - color: #333333;
8 - white-space: nowrap;
9 -}
10 -.dropdown-menu > li > label:hover,
11 -.dropdown-menu > li > label:focus {
12 - text-decoration: none;
13 - color: #262626;
14 - background-color: #f5f5f5;
15 -}
16 -.dropdown-menu > li > input:checked ~ label,
17 -.dropdown-menu > li > input:checked ~ label:hover,
18 -.dropdown-menu > li > input:checked ~ label:focus,
19 -.dropdown-menu > .active > label,
20 -.dropdown-menu > .active > label:hover,
21 -.dropdown-menu > .active > label:focus {
22 - color: #ffffff;
23 - text-decoration: none;
24 - outline: 0;
25 - background-color: #428bca;
26 -}
27 -.dropdown-menu > li > input[disabled] ~ label,
28 -.dropdown-menu > li > input[disabled] ~ label:hover,
29 -.dropdown-menu > li > input[disabled] ~ label:focus,
30 -.dropdown-menu > .disabled > label,
31 -.dropdown-menu > .disabled > label:hover,
32 -.dropdown-menu > .disabled > label:focus {
33 - color: #999999;
34 -}
35 -.dropdown-menu > li > input[disabled] ~ label:hover,
36 -.dropdown-menu > li > input[disabled] ~ label:focus,
37 -.dropdown-menu > .disabled > label:hover,
38 -.dropdown-menu > .disabled > label:focus {
39 - text-decoration: none;
40 - background-color: transparent;
41 - background-image: none;
42 - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
43 - cursor: not-allowed;
44 -}
45 -.dropdown-menu > li > label {
46 - margin-bottom: 0;
47 - cursor: pointer;
48 -}
49 -.dropdown-menu > li > input[type="radio"],
50 -.dropdown-menu > li > input[type="checkbox"] {
51 - display: none;
52 - position: absolute;
53 - top: -9999em;
54 - left: -9999em;
55 -}
56 -.dropdown-menu > li > label:focus,
57 -.dropdown-menu > li > input:focus ~ label {
58 - outline: thin dotted;
59 - outline: 5px auto -webkit-focus-ring-color;
60 - outline-offset: -2px;
61 -}
62 -.dropdown-menu.pull-right {
63 - right: 0;
64 - left: auto;
65 -}
66 -.dropdown-menu.pull-top {
67 - bottom: 100%;
68 - top: auto;
69 - margin: 0 0 2px;
70 - -webkit-box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);
71 - box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);
72 -}
73 -.dropdown-menu.pull-center {
74 - right: 50%;
75 - left: auto;
76 -}
77 -.dropdown-menu.pull-middle {
78 - right: 100%;
79 - margin: 0 2px 0 0;
80 - box-shadow: -5px 0 10px rgba(0, 0, 0, 0.2);
81 - left: auto;
82 -}
83 -.dropdown-menu.pull-middle.pull-right {
84 - right: auto;
85 - left: 100%;
86 - margin: 0 0 0 2px;
87 - box-shadow: 5px 0 10px rgba(0, 0, 0, 0.2);
88 -}
89 -.dropdown-menu.pull-middle.pull-center {
90 - right: 50%;
91 - margin: 0;
92 - box-shadow: 0 0 10px rgba(0, 0, 0, 0.2);
93 -}
94 -.dropdown-menu.bullet {
95 - margin-top: 8px;
96 -}
97 -.dropdown-menu.bullet:before {
98 - width: 0;
99 - height: 0;
100 - content: '';
101 - display: inline-block;
102 - position: absolute;
103 - border-color: transparent;
104 - border-style: solid;
105 - -webkit-transform: rotate(360deg);
106 - border-width: 0 7px 7px;
107 - border-bottom-color: #cccccc;
108 - border-bottom-color: rgba(0, 0, 0, 0.15);
109 - top: -7px;
110 - left: 9px;
111 -}
112 -.dropdown-menu.bullet:after {
113 - width: 0;
114 - height: 0;
115 - content: '';
116 - display: inline-block;
117 - position: absolute;
118 - border-color: transparent;
119 - border-style: solid;
120 - -webkit-transform: rotate(360deg);
121 - border-width: 0 6px 6px;
122 - border-bottom-color: #ffffff;
123 - top: -6px;
124 - left: 10px;
125 -}
126 -.dropdown-menu.bullet.pull-right:before {
127 - left: auto;
128 - right: 9px;
129 -}
130 -.dropdown-menu.bullet.pull-right:after {
131 - left: auto;
132 - right: 10px;
133 -}
134 -.dropdown-menu.bullet.pull-top {
135 - margin-top: 0;
136 - margin-bottom: 8px;
137 -}
138 -.dropdown-menu.bullet.pull-top:before {
139 - top: auto;
140 - bottom: -7px;
141 - border-bottom-width: 0;
142 - border-top-width: 7px;
143 - border-top-color: #cccccc;
144 - border-top-color: rgba(0, 0, 0, 0.15);
145 -}
146 -.dropdown-menu.bullet.pull-top:after {
147 - top: auto;
148 - bottom: -6px;
149 - border-bottom: none;
150 - border-top-width: 6px;
151 - border-top-color: #ffffff;
152 -}
153 -.dropdown-menu.bullet.pull-center:before {
154 - left: auto;
155 - right: 50%;
156 - margin-right: -7px;
157 -}
158 -.dropdown-menu.bullet.pull-center:after {
159 - left: auto;
160 - right: 50%;
161 - margin-right: -6px;
162 -}
163 -.dropdown-menu.bullet.pull-middle {
164 - margin-right: 8px;
165 -}
166 -.dropdown-menu.bullet.pull-middle:before {
167 - top: 50%;
168 - left: 100%;
169 - right: auto;
170 - margin-top: -7px;
171 - border-right-width: 0;
172 - border-bottom-color: transparent;
173 - border-top-width: 7px;
174 - border-left-color: #cccccc;
175 - border-left-color: rgba(0, 0, 0, 0.15);
176 -}
177 -.dropdown-menu.bullet.pull-middle:after {
178 - top: 50%;
179 - left: 100%;
180 - right: auto;
181 - margin-top: -6px;
182 - border-right-width: 0;
183 - border-bottom-color: transparent;
184 - border-top-width: 6px;
185 - border-left-color: #ffffff;
186 -}
187 -.dropdown-menu.bullet.pull-middle.pull-right {
188 - margin-right: 0;
189 - margin-left: 8px;
190 -}
191 -.dropdown-menu.bullet.pull-middle.pull-right:before {
192 - left: -7px;
193 - border-left-width: 0;
194 - border-right-width: 7px;
195 - border-right-color: #cccccc;
196 - border-right-color: rgba(0, 0, 0, 0.15);
197 -}
198 -.dropdown-menu.bullet.pull-middle.pull-right:after {
199 - left: -6px;
200 - border-left-width: 0;
201 - border-right-width: 6px;
202 - border-right-color: #ffffff;
203 -}
204 -.dropdown-menu.bullet.pull-middle.pull-center {
205 - margin-left: 0;
206 - margin-right: 0;
207 -}
208 -.dropdown-menu.bullet.pull-middle.pull-center:before {
209 - border: none;
210 - display: none;
211 -}
212 -.dropdown-menu.bullet.pull-middle.pull-center:after {
213 - border: none;
214 - display: none;
215 -}
216 -.dropdown-submenu {
217 - position: relative;
218 -}
219 -.dropdown-submenu > .dropdown-menu {
220 - top: 0;
221 - left: 100%;
222 - margin-top: -6px;
223 - margin-left: -1px;
224 - border-top-left-radius: 0;
225 -}
226 -.dropdown-submenu > a:before {
227 - display: block;
228 - float: right;
229 - width: 0;
230 - height: 0;
231 - content: "";
232 - margin-top: 6px;
233 - margin-right: -8px;
234 - border-width: 4px 0 4px 4px;
235 - border-style: solid;
236 - border-left-style: dashed;
237 - border-top-color: transparent;
238 - border-bottom-color: transparent;
239 -}
240 -@media (max-width: 767px) {
241 - .navbar-nav .dropdown-submenu > a:before {
242 - margin-top: 8px;
243 - border-color: inherit;
244 - border-style: solid;
245 - border-width: 4px 4px 0;
246 - border-left-color: transparent;
247 - border-right-color: transparent;
248 - }
249 - .navbar-nav .dropdown-submenu > a {
250 - padding-left: 40px;
251 - }
252 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > a,
253 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > label {
254 - padding-left: 35px;
255 - }
256 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > a,
257 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > label {
258 - padding-left: 45px;
259 - }
260 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
261 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
262 - padding-left: 55px;
263 - }
264 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
265 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
266 - padding-left: 65px;
267 - }
268 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
269 - .navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
270 - padding-left: 75px;
271 - }
272 -}
273 -.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,
274 -.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,
275 -.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {
276 - background-color: #e7e7e7;
277 - color: #555555;
278 -}
279 -@media (max-width: 767px) {
280 - .navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {
281 - border-top-color: #555555;
282 - }
283 -}
284 -.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,
285 -.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,
286 -.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {
287 - background-color: #080808;
288 - color: #ffffff;
289 -}
290 -@media (max-width: 767px) {
291 - .navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {
292 - border-top-color: #ffffff;
293 - }
294 -}
1 -package admin
2 -
3 -const (
4 - // DefaultBindAddress is the default bind address for the HTTP server.
5 - DefaultBindAddress = ":8083"
6 -)
7 -
8 -// Config represents the configuration for the admin service.
9 -type Config struct {
10 - Enabled bool `toml:"enabled"`
11 - BindAddress string `toml:"bind-address"`
12 - HTTPSEnabled bool `toml:"https-enabled"`
13 - HTTPSCertificate string `toml:"https-certificate"`
14 - Version string `toml:"-"`
15 -}
16 -
17 -// NewConfig returns an instance of Config with defaults.
18 -func NewConfig() Config {
19 - return Config{
20 - BindAddress: DefaultBindAddress,
21 - HTTPSEnabled: false,
22 - HTTPSCertificate: "/etc/ssl/influxdb.pem",
23 - }
24 -}
1 -package admin_test
2 -
3 -import (
4 - "testing"
5 -
6 - "github.com/BurntSushi/toml"
7 - "github.com/influxdata/influxdb/services/admin"
8 -)
9 -
10 -func TestConfig_Parse(t *testing.T) {
11 - // Parse configuration.
12 - var c admin.Config
13 - if _, err := toml.Decode(`
14 -enabled = true
15 -bind-address = ":8083"
16 -https-enabled = true
17 -https-certificate = "/dev/null"
18 -`, &c); err != nil {
19 - t.Fatal(err)
20 - }
21 -
22 - // Validate configuration.
23 - if c.Enabled != true {
24 - t.Fatalf("unexpected enabled: %v", c.Enabled)
25 - } else if c.BindAddress != ":8083" {
26 - t.Fatalf("unexpected bind address: %s", c.BindAddress)
27 - } else if c.HTTPSEnabled != true {
28 - t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled)
29 - } else if c.HTTPSCertificate != "/dev/null" {
30 - t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate)
31 - }
32 -}
1 -package admin // import "github.com/influxdata/influxdb/services/admin"
2 -
3 -import (
4 - "crypto/tls"
5 - "fmt"
6 - "net"
7 - "net/http"
8 - "strings"
9 -
10 - // Register static assets via statik.
11 - _ "github.com/influxdata/influxdb/services/admin/statik"
12 - "github.com/rakyll/statik/fs"
13 - "github.com/uber-go/zap"
14 -)
15 -
16 -// Service manages the listener for an admin endpoint.
17 -type Service struct {
18 - listener net.Listener
19 - addr string
20 - https bool
21 - cert string
22 - err chan error
23 - version string
24 -
25 - logger zap.Logger
26 -}
27 -
28 -// NewService returns a new instance of Service.
29 -func NewService(c Config) *Service {
30 - return &Service{
31 - addr: c.BindAddress,
32 - https: c.HTTPSEnabled,
33 - cert: c.HTTPSCertificate,
34 - err: make(chan error),
35 - version: c.Version,
36 - logger: zap.New(zap.NullEncoder()),
37 - }
38 -}
39 -
40 -// Open starts the service
41 -func (s *Service) Open() error {
42 - s.logger.Info("Starting admin service")
43 - s.logger.Info("DEPRECATED: This plugin is deprecated as of 1.1.0 and will be removed in a future release")
44 -
45 - // Open listener.
46 - if s.https {
47 - cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
48 - if err != nil {
49 - return err
50 - }
51 -
52 - listener, err := tls.Listen("tcp", s.addr, &tls.Config{
53 - Certificates: []tls.Certificate{cert},
54 - })
55 - if err != nil {
56 - return err
57 - }
58 -
59 - s.logger.Info(fmt.Sprint("Listening on HTTPS: ", listener.Addr().String()))
60 - s.listener = listener
61 - } else {
62 - listener, err := net.Listen("tcp", s.addr)
63 - if err != nil {
64 - return err
65 - }
66 -
67 - s.logger.Info(fmt.Sprint("Listening on HTTP: ", listener.Addr().String()))
68 - s.listener = listener
69 - }
70 -
71 - // Begin listening for requests in a separate goroutine.
72 - go s.serve()
73 - return nil
74 -}
75 -
76 -// Close closes the underlying listener.
77 -func (s *Service) Close() error {
78 - if s.listener != nil {
79 - return s.listener.Close()
80 - }
81 - return nil
82 -}
83 -
84 -// WithLogger sets the logger for the service
85 -func (s *Service) WithLogger(log zap.Logger) {
86 - s.logger = log.With(zap.String("service", "admin"))
87 -}
88 -
89 -// Err returns a channel for fatal errors that occur on the listener.
90 -func (s *Service) Err() <-chan error { return s.err }
91 -
92 -// Addr returns the listener's address. Returns nil if listener is closed.
93 -func (s *Service) Addr() net.Addr {
94 - if s.listener != nil {
95 - return s.listener.Addr()
96 - }
97 - return nil
98 -}
99 -
100 -// serve serves the handler from the listener.
101 -func (s *Service) serve() {
102 - addVersionHeaderThenServe := func(h http.Handler) http.HandlerFunc {
103 - return func(w http.ResponseWriter, r *http.Request) {
104 - w.Header().Add("X-InfluxDB-Version", s.version)
105 - h.ServeHTTP(w, r)
106 - }
107 - }
108 -
109 - // Instantiate file system from embedded admin.
110 - statikFS, err := fs.New()
111 - if err != nil {
112 - panic(err)
113 - }
114 -
115 - // Run file system handler on listener.
116 - err = http.Serve(s.listener, addVersionHeaderThenServe(http.FileServer(statikFS)))
117 - if err != nil && !strings.Contains(err.Error(), "closed") {
118 - s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err)
119 - }
120 -}
1 -package admin_test
2 -
3 -import (
4 - "io/ioutil"
5 - "net/http"
6 - "testing"
7 -
8 - "github.com/influxdata/influxdb/services/admin"
9 -)
10 -
11 -// Ensure service can serve the root index page of the admin.
12 -func TestService_Index(t *testing.T) {
13 - // Start service on random port.
14 - s := admin.NewService(admin.Config{BindAddress: "127.0.0.1:0"})
15 - if err := s.Open(); err != nil {
16 - t.Fatal(err)
17 - }
18 - defer s.Close()
19 -
20 - // Request root index page.
21 - resp, err := http.Get("http://" + s.Addr().String())
22 - if err != nil {
23 - t.Fatal(err)
24 - }
25 - defer resp.Body.Close()
26 -
27 - // Validate status code and body.
28 - if resp.StatusCode != http.StatusOK {
29 - t.Fatalf("unexpected status: %d", resp.StatusCode)
30 - } else if _, err := ioutil.ReadAll(resp.Body); err != nil {
31 - t.Fatalf("unable to read body: %s", err)
32 - }
33 -}
1 -Please note that this file is automatically generated by the
2 -[statik](https://github.com/rakyll/statik) tool, and should not be
3 -updated directly. See the Admin UI readme for more information.
This diff could not be displayed because it is too large.
...@@ -222,8 +222,8 @@ func TestParse(t *testing.T) { ...@@ -222,8 +222,8 @@ func TestParse(t *testing.T) {
222 // If we erred out,it was intended and the following tests won't work 222 // If we erred out,it was intended and the following tests won't work
223 continue 223 continue
224 } 224 }
225 - if point.Name() != test.measurement { 225 + if string(point.Name()) != test.measurement {
226 - t.Fatalf("name parse failer. expected %v, got %v", test.measurement, point.Name()) 226 + t.Fatalf("name parse failer. expected %v, got %v", test.measurement, string(point.Name()))
227 } 227 }
228 if len(point.Tags()) != len(test.tags) { 228 if len(point.Tags()) != len(test.tags) {
229 t.Fatalf("tags len mismatch. expected %d, got %d", len(test.tags), len(point.Tags())) 229 t.Fatalf("tags len mismatch. expected %d, got %d", len(test.tags), len(point.Tags()))
......
...@@ -10,7 +10,6 @@ import ( ...@@ -10,7 +10,6 @@ import (
10 "io" 10 "io"
11 "log" 11 "log"
12 "net/http" 12 "net/http"
13 - "net/http/pprof"
14 "os" 13 "os"
15 "runtime/debug" 14 "runtime/debug"
16 "strconv" 15 "strconv"
...@@ -38,6 +37,10 @@ const ( ...@@ -38,6 +37,10 @@ const (
38 // 37 //
39 // This has no relation to the number of bytes that are returned. 38 // This has no relation to the number of bytes that are returned.
40 DefaultChunkSize = 10000 39 DefaultChunkSize = 10000
40 +
41 + DefaultDebugRequestsInterval = 10 * time.Second
42 +
43 + MaxDebugRequestsInterval = 6 * time.Hour
41 ) 44 )
42 45
43 // AuthenticationMethod defines the type of authentication used. 46 // AuthenticationMethod defines the type of authentication used.
...@@ -71,6 +74,7 @@ type Handler struct { ...@@ -71,6 +74,7 @@ type Handler struct {
71 74
72 MetaClient interface { 75 MetaClient interface {
73 Database(name string) *meta.DatabaseInfo 76 Database(name string) *meta.DatabaseInfo
77 + Databases() []meta.DatabaseInfo
74 Authenticate(username, password string) (ui *meta.UserInfo, err error) 78 Authenticate(username, password string) (ui *meta.UserInfo, err error)
75 User(username string) (*meta.UserInfo, error) 79 User(username string) (*meta.UserInfo, error)
76 AdminUserExists() bool 80 AdminUserExists() bool
...@@ -99,6 +103,8 @@ type Handler struct { ...@@ -99,6 +103,8 @@ type Handler struct {
99 Logger zap.Logger 103 Logger zap.Logger
100 CLFLogger *log.Logger 104 CLFLogger *log.Logger
101 stats *Statistics 105 stats *Statistics
106 +
107 + requestTracker *RequestTracker
102 } 108 }
103 109
104 // NewHandler returns a new instance of handler with routes. 110 // NewHandler returns a new instance of handler with routes.
...@@ -109,6 +115,7 @@ func NewHandler(c Config) *Handler { ...@@ -109,6 +115,7 @@ func NewHandler(c Config) *Handler {
109 Logger: zap.New(zap.NullEncoder()), 115 Logger: zap.New(zap.NullEncoder()),
110 CLFLogger: log.New(os.Stderr, "[httpd] ", 0), 116 CLFLogger: log.New(os.Stderr, "[httpd] ", 0),
111 stats: &Statistics{}, 117 stats: &Statistics{},
118 + requestTracker: NewRequestTracker(),
112 } 119 }
113 120
114 h.AddRoutes([]Route{ 121 h.AddRoutes([]Route{
...@@ -245,18 +252,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -245,18 +252,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
245 w.Header().Add("X-Influxdb-Version", h.Version) 252 w.Header().Add("X-Influxdb-Version", h.Version)
246 253
247 if strings.HasPrefix(r.URL.Path, "/debug/pprof") && h.Config.PprofEnabled { 254 if strings.HasPrefix(r.URL.Path, "/debug/pprof") && h.Config.PprofEnabled {
248 - switch r.URL.Path { 255 + h.handleProfiles(w, r)
249 - case "/debug/pprof/cmdline":
250 - pprof.Cmdline(w, r)
251 - case "/debug/pprof/profile":
252 - pprof.Profile(w, r)
253 - case "/debug/pprof/symbol":
254 - pprof.Symbol(w, r)
255 - default:
256 - pprof.Index(w, r)
257 - }
258 } else if strings.HasPrefix(r.URL.Path, "/debug/vars") { 256 } else if strings.HasPrefix(r.URL.Path, "/debug/vars") {
259 h.serveExpvar(w, r) 257 h.serveExpvar(w, r)
258 + } else if strings.HasPrefix(r.URL.Path, "/debug/requests") {
259 + h.serveDebugRequests(w, r)
260 } else { 260 } else {
261 h.mux.ServeHTTP(w, r) 261 h.mux.ServeHTTP(w, r)
262 } 262 }
...@@ -282,6 +282,7 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta. ...@@ -282,6 +282,7 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.
282 defer func(start time.Time) { 282 defer func(start time.Time) {
283 atomic.AddInt64(&h.stats.QueryRequestDuration, time.Since(start).Nanoseconds()) 283 atomic.AddInt64(&h.stats.QueryRequestDuration, time.Since(start).Nanoseconds())
284 }(time.Now()) 284 }(time.Now())
285 + h.requestTracker.Add(r, user)
285 286
286 // Retrieve the underlying ResponseWriter or initialize our own. 287 // Retrieve the underlying ResponseWriter or initialize our own.
287 rw, ok := w.(ResponseWriter) 288 rw, ok := w.(ResponseWriter)
...@@ -584,6 +585,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta. ...@@ -584,6 +585,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
584 atomic.AddInt64(&h.stats.ActiveWriteRequests, -1) 585 atomic.AddInt64(&h.stats.ActiveWriteRequests, -1)
585 atomic.AddInt64(&h.stats.WriteRequestDuration, time.Since(start).Nanoseconds()) 586 atomic.AddInt64(&h.stats.WriteRequestDuration, time.Since(start).Nanoseconds())
586 }(time.Now()) 587 }(time.Now())
588 + h.requestTracker.Add(r, user)
587 589
588 database := r.URL.Query().Get("db") 590 database := r.URL.Query().Get("db")
589 if database == "" { 591 if database == "" {
...@@ -675,7 +677,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta. ...@@ -675,7 +677,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
675 } else if werr, ok := err.(tsdb.PartialWriteError); ok { 677 } else if werr, ok := err.(tsdb.PartialWriteError); ok {
676 atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)-werr.Dropped)) 678 atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)-werr.Dropped))
677 atomic.AddInt64(&h.stats.PointsWrittenDropped, int64(werr.Dropped)) 679 atomic.AddInt64(&h.stats.PointsWrittenDropped, int64(werr.Dropped))
678 - h.httpError(w, fmt.Sprintf("partial write: %v", werr), http.StatusBadRequest) 680 + h.httpError(w, werr.Error(), http.StatusBadRequest)
679 return 681 return
680 } else if err != nil { 682 } else if err != nil {
681 atomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points))) 683 atomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))
...@@ -686,7 +688,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta. ...@@ -686,7 +688,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
686 atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points))) 688 atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))
687 // The other points failed to parse which means the client sent invalid line protocol. We return a 400 689 // The other points failed to parse which means the client sent invalid line protocol. We return a 400
688 // response code as well as the lines that failed to parse. 690 // response code as well as the lines that failed to parse.
689 - h.httpError(w, fmt.Sprintf("partial write:\n%v", parseError), http.StatusBadRequest) 691 + h.httpError(w, tsdb.PartialWriteError{Reason: parseError.Error()}.Error(), http.StatusBadRequest)
690 return 692 return
691 } 693 }
692 694
...@@ -834,6 +836,64 @@ func (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) { ...@@ -834,6 +836,64 @@ func (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) {
834 fmt.Fprintln(w, "\n}") 836 fmt.Fprintln(w, "\n}")
835 } 837 }
836 838
839 +// serveDebugRequests will track requests for a period of time.
840 +func (h *Handler) serveDebugRequests(w http.ResponseWriter, r *http.Request) {
841 + var d time.Duration
842 + if s := r.URL.Query().Get("seconds"); s == "" {
843 + d = DefaultDebugRequestsInterval
844 + } else if seconds, err := strconv.ParseInt(s, 10, 64); err != nil {
845 + h.httpError(w, err.Error(), http.StatusBadRequest)
846 + return
847 + } else {
848 + d = time.Duration(seconds) * time.Second
849 + if d > MaxDebugRequestsInterval {
850 + h.httpError(w, fmt.Sprintf("exceeded maximum interval time: %s > %s",
851 + influxql.FormatDuration(d),
852 + influxql.FormatDuration(MaxDebugRequestsInterval)),
853 + http.StatusBadRequest)
854 + return
855 + }
856 + }
857 +
858 + var closing <-chan bool
859 + if notifier, ok := w.(http.CloseNotifier); ok {
860 + closing = notifier.CloseNotify()
861 + }
862 +
863 + profile := h.requestTracker.TrackRequests()
864 +
865 + timer := time.NewTimer(d)
866 + select {
867 + case <-timer.C:
868 + profile.Stop()
869 + case <-closing:
870 + // Connection was closed early.
871 + profile.Stop()
872 + timer.Stop()
873 + return
874 + }
875 +
876 + w.Header().Set("Content-Type", "application/json; charset=utf-8")
877 + w.Header().Add("Connection", "close")
878 +
879 + fmt.Fprintln(w, "{")
880 + first := true
881 + for req, st := range profile.Requests {
882 + val, err := json.Marshal(st)
883 + if err != nil {
884 + continue
885 + }
886 +
887 + if !first {
888 + fmt.Fprintln(w, ",")
889 + }
890 + first = false
891 + fmt.Fprintf(w, "%q: ", req.String())
892 + w.Write(bytes.TrimSpace(val))
893 + }
894 + fmt.Fprintln(w, "\n}")
895 +}
896 +
837 // parseSystemDiagnostics converts the system diagnostics into an appropriate 897 // parseSystemDiagnostics converts the system diagnostics into an appropriate
838 // format for marshaling to JSON in the /debug/vars format. 898 // format for marshaling to JSON in the /debug/vars format.
839 func parseSystemDiagnostics(d *diagnostics.Diagnostics) (map[string]interface{}, error) { 899 func parseSystemDiagnostics(d *diagnostics.Diagnostics) (map[string]interface{}, error) {
......
...@@ -14,6 +14,8 @@ import ( ...@@ -14,6 +14,8 @@ import (
14 "testing" 14 "testing"
15 "time" 15 "time"
16 16
17 + "github.com/influxdata/influxdb/internal"
18 +
17 "github.com/dgrijalva/jwt-go" 19 "github.com/dgrijalva/jwt-go"
18 "github.com/influxdata/influxdb/influxql" 20 "github.com/influxdata/influxdb/influxql"
19 "github.com/influxdata/influxdb/models" 21 "github.com/influxdata/influxdb/models"
...@@ -605,7 +607,7 @@ func TestHandler_XForwardedFor(t *testing.T) { ...@@ -605,7 +607,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
605 // NewHandler represents a test wrapper for httpd.Handler. 607 // NewHandler represents a test wrapper for httpd.Handler.
606 type Handler struct { 608 type Handler struct {
607 *httpd.Handler 609 *httpd.Handler
608 - MetaClient HandlerMetaStore 610 + MetaClient *internal.MetaClientMock
609 StatementExecutor HandlerStatementExecutor 611 StatementExecutor HandlerStatementExecutor
610 QueryAuthorizer HandlerQueryAuthorizer 612 QueryAuthorizer HandlerQueryAuthorizer
611 } 613 }
...@@ -619,7 +621,10 @@ func NewHandler(requireAuthentication bool) *Handler { ...@@ -619,7 +621,10 @@ func NewHandler(requireAuthentication bool) *Handler {
619 h := &Handler{ 621 h := &Handler{
620 Handler: httpd.NewHandler(config), 622 Handler: httpd.NewHandler(config),
621 } 623 }
622 - h.Handler.MetaClient = &h.MetaClient 624 +
625 + h.MetaClient = &internal.MetaClientMock{}
626 +
627 + h.Handler.MetaClient = h.MetaClient
623 h.Handler.QueryExecutor = influxql.NewQueryExecutor() 628 h.Handler.QueryExecutor = influxql.NewQueryExecutor()
624 h.Handler.QueryExecutor.StatementExecutor = &h.StatementExecutor 629 h.Handler.QueryExecutor.StatementExecutor = &h.StatementExecutor
625 h.Handler.QueryAuthorizer = &h.QueryAuthorizer 630 h.Handler.QueryAuthorizer = &h.QueryAuthorizer
...@@ -627,39 +632,6 @@ func NewHandler(requireAuthentication bool) *Handler { ...@@ -627,39 +632,6 @@ func NewHandler(requireAuthentication bool) *Handler {
627 return h 632 return h
628 } 633 }
629 634
630 -// HandlerMetaStore is a mock implementation of Handler.MetaClient.
631 -type HandlerMetaStore struct {
632 - PingFn func(d time.Duration) error
633 - DatabaseFn func(name string) *meta.DatabaseInfo
634 - AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
635 - UserFn func(username string) (*meta.UserInfo, error)
636 - AdminUserExistsFn func() bool
637 -}
638 -
639 -func (s *HandlerMetaStore) Ping(b bool) error {
640 - if s.PingFn == nil {
641 - // Default behaviour is to assume there is a leader.
642 - return nil
643 - }
644 - return s.Ping(b)
645 -}
646 -
647 -func (s *HandlerMetaStore) Database(name string) *meta.DatabaseInfo {
648 - return s.DatabaseFn(name)
649 -}
650 -
651 -func (s *HandlerMetaStore) Authenticate(username, password string) (ui *meta.UserInfo, err error) {
652 - return s.AuthenticateFn(username, password)
653 -}
654 -
655 -func (s *HandlerMetaStore) AdminUserExists() bool {
656 - return s.AdminUserExistsFn()
657 -}
658 -
659 -func (s *HandlerMetaStore) User(username string) (*meta.UserInfo, error) {
660 - return s.UserFn(username)
661 -}
662 -
663 // HandlerStatementExecutor is a mock implementation of Handler.StatementExecutor. 635 // HandlerStatementExecutor is a mock implementation of Handler.StatementExecutor.
664 type HandlerStatementExecutor struct { 636 type HandlerStatementExecutor struct {
665 ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error 637 ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error
......
1 +package httpd
2 +
3 +import (
4 + "archive/tar"
5 + "bytes"
6 + "compress/gzip"
7 + "fmt"
8 + "io"
9 + "net/http"
10 + httppprof "net/http/pprof"
11 + "runtime/pprof"
12 + "sort"
13 + "strconv"
14 + "text/tabwriter"
15 + "time"
16 +
17 + "github.com/influxdata/influxdb/models"
18 +)
19 +
20 +// handleProfiles determines which profile to return to the requester.
21 +func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
22 + switch r.URL.Path {
23 + case "/debug/pprof/cmdline":
24 + httppprof.Cmdline(w, r)
25 + case "/debug/pprof/profile":
26 + httppprof.Profile(w, r)
27 + case "/debug/pprof/symbol":
28 + httppprof.Symbol(w, r)
29 + case "/debug/pprof/all":
30 + h.archiveProfilesAndQueries(w, r)
31 + default:
32 + httppprof.Index(w, r)
33 + }
34 +}
35 +
36 +// prof describes a profile name and a debug value, or in the case of a CPU
37 +// profile, the number of seconds to collect the profile for.
38 +type prof struct {
39 + Name string
40 + Debug int64
41 +}
42 +
43 +// archiveProfilesAndQueries collects the following profiles:
44 +// - goroutine profile
45 +// - heap profile
46 +// - blocking profile
47 +// - (optionally) CPU profile
48 +//
49 +// It also collects the following query results:
50 +//
51 +// - SHOW SHARDS
52 +// - SHOW STATS
53 +// - SHOW DIAGNOSTICS
54 +//
55 +// All information is added to a tar archive and then compressed, before being
56 +// returned to the requester as an archive file. Where profiles support debug
57 +// parameters, the profile is collected with debug=1. To optionally include a
58 +// CPU profile, the requester should provide a `cpu` query parameter, and can
59 +// also provide a `seconds` parameter to specify a non-default profile
60 +// collection time. The default CPU profile collection time is 30 seconds.
61 +//
62 +// Example request including CPU profile:
63 +//
64 +// http://localhost:8086/debug/pprof/all?cpu=true&seconds=45
65 +//
66 +// The value after the `cpu` query parameter is not actually important, as long
67 +// as there is something there.
68 +//
69 +func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {
70 + var allProfs = []*prof{
71 + {Name: "goroutine", Debug: 1},
72 + {Name: "block", Debug: 1},
73 + {Name: "heap", Debug: 1},
74 + }
75 +
76 + // Capture a CPU profile?
77 + if r.FormValue("cpu") != "" {
78 + profile := &prof{Name: "cpu"}
79 +
80 + // For a CPU profile we'll use the Debug field to indicate the number of
81 + // seconds to capture the profile for.
82 + profile.Debug, _ = strconv.ParseInt(r.FormValue("seconds"), 10, 64)
83 + if profile.Debug <= 0 {
84 + profile.Debug = 30
85 + }
86 + allProfs = append([]*prof{profile}, allProfs...) // CPU profile first.
87 + }
88 +
89 + var (
90 + resp bytes.Buffer // Temporary buffer for entire archive.
91 + buf bytes.Buffer // Temporary buffer for each profile/query result.
92 + )
93 +
94 + gz := gzip.NewWriter(&resp)
95 + tw := tar.NewWriter(gz)
96 +
97 + // Collect and write out profiles.
98 + for _, profile := range allProfs {
99 + if profile.Name == "cpu" {
100 + if err := pprof.StartCPUProfile(&buf); err != nil {
101 + http.Error(w, err.Error(), http.StatusInternalServerError)
102 + return
103 + }
104 +
105 + sleep(w, time.Duration(profile.Debug)*time.Second)
106 + pprof.StopCPUProfile()
107 + } else {
108 + prof := pprof.Lookup(profile.Name)
109 + if prof == nil {
110 + http.Error(w, "unable to find profile "+profile.Name, http.StatusInternalServerError)
111 + return
112 + }
113 +
114 + if err := prof.WriteTo(&buf, int(profile.Debug)); err != nil {
115 + http.Error(w, err.Error(), http.StatusInternalServerError)
116 + return
117 + }
118 + }
119 +
120 + // Write the profile file's header.
121 + err := tw.WriteHeader(&tar.Header{
122 + Name: profile.Name + ".txt",
123 + Mode: 0600,
124 + Size: int64(buf.Len()),
125 + })
126 + if err != nil {
127 + http.Error(w, err.Error(), http.StatusInternalServerError)
128 + }
129 +
130 + // Write the profile file's data.
131 + if _, err := tw.Write(buf.Bytes()); err != nil {
132 + http.Error(w, err.Error(), http.StatusInternalServerError)
133 + }
134 +
135 + // Reset the buffer for the next profile.
136 + buf.Reset()
137 + }
138 +
139 + // Collect and write out the queries.
140 + var allQueries = []struct {
141 + name string
142 + fn func() ([]*models.Row, error)
143 + }{
144 + {"shards", h.showShards},
145 + {"stats", h.showStats},
146 + {"diagnostics", h.showDiagnostics},
147 + }
148 +
149 + tabW := tabwriter.NewWriter(&buf, 8, 8, 1, '\t', 0)
150 + for _, query := range allQueries {
151 + rows, err := query.fn()
152 + if err != nil {
153 + http.Error(w, err.Error(), http.StatusInternalServerError)
154 + }
155 +
156 + for i, row := range rows {
157 + var out []byte
158 + // Write the columns
159 + for _, col := range row.Columns {
160 + out = append(out, []byte(col+"\t")...)
161 + }
162 + out = append(out, '\n')
163 + if _, err := tabW.Write(out); err != nil {
164 + http.Error(w, err.Error(), http.StatusInternalServerError)
165 + }
166 +
167 + // Write all the values
168 + for _, val := range row.Values {
169 + out = out[:0]
170 + for _, v := range val {
171 + out = append(out, []byte(fmt.Sprintf("%v\t", v))...)
172 + }
173 + out = append(out, '\n')
174 + if _, err := tabW.Write(out); err != nil {
175 + http.Error(w, err.Error(), http.StatusInternalServerError)
176 + }
177 + }
178 +
179 + // Write a final newline
180 + if i < len(rows)-1 {
181 + if _, err := tabW.Write([]byte("\n")); err != nil {
182 + http.Error(w, err.Error(), http.StatusInternalServerError)
183 + }
184 + }
185 + }
186 +
187 + if err := tabW.Flush(); err != nil {
188 + http.Error(w, err.Error(), http.StatusInternalServerError)
189 + }
190 +
191 + err = tw.WriteHeader(&tar.Header{
192 + Name: query.name + ".txt",
193 + Mode: 0600,
194 + Size: int64(buf.Len()),
195 + })
196 + if err != nil {
197 + http.Error(w, err.Error(), http.StatusInternalServerError)
198 + }
199 +
200 + // Write the query file's data.
201 + if _, err := tw.Write(buf.Bytes()); err != nil {
202 + http.Error(w, err.Error(), http.StatusInternalServerError)
203 + }
204 +
205 + // Reset the buffer for the next query.
206 + buf.Reset()
207 + }
208 +
209 + // Close the tar writer.
210 + if err := tw.Close(); err != nil {
211 + http.Error(w, err.Error(), http.StatusInternalServerError)
212 + }
213 +
214 + // Close the gzip writer.
215 + if err := gz.Close(); err != nil {
216 + http.Error(w, err.Error(), http.StatusInternalServerError)
217 + }
218 +
219 + // Return the gzipped archive.
220 + w.Header().Set("Content-Disposition", "attachment; filename=profiles.tar.gz")
221 + w.Header().Set("Content-Type", "application/gzip")
222 + io.Copy(w, &resp) // Nothing we can really do about an error at this point.
223 +}
224 +
225 +// showShards generates the same values that a StatementExecutor would if a
226 +// SHOW SHARDS query was executed.
227 +func (h *Handler) showShards() ([]*models.Row, error) {
228 + dis := h.MetaClient.Databases()
229 +
230 + rows := []*models.Row{}
231 + for _, di := range dis {
232 + row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
233 + for _, rpi := range di.RetentionPolicies {
234 + for _, sgi := range rpi.ShardGroups {
235 + // Shards associated with deleted shard groups are effectively deleted.
236 + // Don't list them.
237 + if sgi.Deleted() {
238 + continue
239 + }
240 +
241 + for _, si := range sgi.Shards {
242 + ownerIDs := make([]uint64, len(si.Owners))
243 + for i, owner := range si.Owners {
244 + ownerIDs[i] = owner.NodeID
245 + }
246 +
247 + row.Values = append(row.Values, []interface{}{
248 + si.ID,
249 + di.Name,
250 + rpi.Name,
251 + sgi.ID,
252 + sgi.StartTime.UTC().Format(time.RFC3339),
253 + sgi.EndTime.UTC().Format(time.RFC3339),
254 + sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),
255 + joinUint64(ownerIDs),
256 + })
257 + }
258 + }
259 + }
260 + rows = append(rows, row)
261 + }
262 + return rows, nil
263 +}
264 +
265 +// showDiagnostics generates the same values that a StatementExecutor would if a
266 +// SHOW DIAGNOSTICS query was executed.
267 +func (h *Handler) showDiagnostics() ([]*models.Row, error) {
268 + diags, err := h.Monitor.Diagnostics()
269 + if err != nil {
270 + return nil, err
271 + }
272 +
273 + // Get a sorted list of diagnostics keys.
274 + sortedKeys := make([]string, 0, len(diags))
275 + for k := range diags {
276 + sortedKeys = append(sortedKeys, k)
277 + }
278 + sort.Strings(sortedKeys)
279 +
280 + rows := make([]*models.Row, 0, len(diags))
281 + for _, k := range sortedKeys {
282 + row := &models.Row{Name: k}
283 +
284 + row.Columns = diags[k].Columns
285 + row.Values = diags[k].Rows
286 + rows = append(rows, row)
287 + }
288 + return rows, nil
289 +}
290 +
291 +// showStats generates the same values that a StatementExecutor would if a
292 +// SHOW STATS query was executed.
293 +func (h *Handler) showStats() ([]*models.Row, error) {
294 + stats, err := h.Monitor.Statistics(nil)
295 + if err != nil {
296 + return nil, err
297 + }
298 +
299 + var rows []*models.Row
300 + for _, stat := range stats {
301 + row := &models.Row{Name: stat.Name, Tags: stat.Tags}
302 +
303 + values := make([]interface{}, 0, len(stat.Values))
304 + for _, k := range stat.ValueNames() {
305 + row.Columns = append(row.Columns, k)
306 + values = append(values, stat.Values[k])
307 + }
308 + row.Values = [][]interface{}{values}
309 + rows = append(rows, row)
310 + }
311 + return rows, nil
312 +}
313 +
314 +// joinUint64 returns a comma-delimited string of uint64 numbers.
315 +func joinUint64(a []uint64) string {
316 + var buf []byte // Could take a guess at initial size here.
317 + for i, x := range a {
318 + if i != 0 {
319 + buf = append(buf, ',')
320 + }
321 + buf = strconv.AppendUint(buf, x, 10)
322 + }
323 + return string(buf)
324 +}
325 +
326 +// Taken from net/http/pprof/pprof.go
327 +func sleep(w http.ResponseWriter, d time.Duration) {
328 + var clientGone <-chan bool
329 + if cn, ok := w.(http.CloseNotifier); ok {
330 + clientGone = cn.CloseNotify()
331 + }
332 + select {
333 + case <-time.After(d):
334 + case <-clientGone:
335 + }
336 +}
1 +package httpd
2 +
3 +import (
4 + "container/list"
5 + "fmt"
6 + "net"
7 + "net/http"
8 + "sync"
9 + "sync/atomic"
10 +
11 + "github.com/influxdata/influxdb/services/meta"
12 +)
13 +
14 +type RequestInfo struct {
15 + IPAddr string
16 + Username string
17 +}
18 +
19 +type RequestStats struct {
20 + Writes int64 `json:"writes"`
21 + Queries int64 `json:"queries"`
22 +}
23 +
24 +func (r *RequestInfo) String() string {
25 + if r.Username != "" {
26 + return fmt.Sprintf("%s:%s", r.Username, r.IPAddr)
27 + }
28 + return r.IPAddr
29 +}
30 +
31 +type RequestProfile struct {
32 + tracker *RequestTracker
33 + elem *list.Element
34 +
35 + mu sync.RWMutex
36 + Requests map[RequestInfo]*RequestStats
37 +}
38 +
39 +func (p *RequestProfile) AddWrite(info RequestInfo) {
40 + p.add(info, p.addWrite)
41 +}
42 +
43 +func (p *RequestProfile) AddQuery(info RequestInfo) {
44 + p.add(info, p.addQuery)
45 +}
46 +
47 +func (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) {
48 + // Look for a request entry for this request.
49 + p.mu.RLock()
50 + st, ok := p.Requests[info]
51 + p.mu.RUnlock()
52 + if ok {
53 + fn(st)
54 + return
55 + }
56 +
57 + // There is no entry in the request tracker. Create one.
58 + p.mu.Lock()
59 + if st, ok := p.Requests[info]; ok {
60 + // Something else created this entry while we were waiting for the lock.
61 + p.mu.Unlock()
62 + fn(st)
63 + return
64 + }
65 +
66 + st = &RequestStats{}
67 + p.Requests[info] = st
68 + p.mu.Unlock()
69 + fn(st)
70 +}
71 +
72 +func (p *RequestProfile) addWrite(st *RequestStats) {
73 + atomic.AddInt64(&st.Writes, 1)
74 +}
75 +
76 +func (p *RequestProfile) addQuery(st *RequestStats) {
77 + atomic.AddInt64(&st.Queries, 1)
78 +}
79 +
80 +// Stop informs the RequestTracker to stop collecting statistics for this
81 +// profile.
82 +func (p *RequestProfile) Stop() {
83 + p.tracker.mu.Lock()
84 + p.tracker.profiles.Remove(p.elem)
85 + p.tracker.mu.Unlock()
86 +}
87 +
88 +type RequestTracker struct {
89 + mu sync.RWMutex
90 + profiles *list.List
91 +}
92 +
93 +func NewRequestTracker() *RequestTracker {
94 + return &RequestTracker{
95 + profiles: list.New(),
96 + }
97 +}
98 +
99 +func (rt *RequestTracker) TrackRequests() *RequestProfile {
100 + // Perform the memory allocation outside of the lock.
101 + profile := &RequestProfile{
102 + Requests: make(map[RequestInfo]*RequestStats),
103 + tracker: rt,
104 + }
105 +
106 + rt.mu.Lock()
107 + profile.elem = rt.profiles.PushBack(profile)
108 + rt.mu.Unlock()
109 + return profile
110 +}
111 +
112 +func (rt *RequestTracker) Add(req *http.Request, user *meta.UserInfo) {
113 + rt.mu.RLock()
114 + if rt.profiles.Len() == 0 {
115 + rt.mu.RUnlock()
116 + return
117 + }
118 + defer rt.mu.RUnlock()
119 +
120 + var info RequestInfo
121 + host, _, err := net.SplitHostPort(req.RemoteAddr)
122 + if err != nil {
123 + return
124 + }
125 +
126 + info.IPAddr = host
127 + if user != nil {
128 + info.Username = user.Name
129 + }
130 +
131 + // Add the request info to the profiles.
132 + for p := rt.profiles.Front(); p != nil; p = p.Next() {
133 + profile := p.Value.(*RequestProfile)
134 + if req.URL.Path == "/query" {
135 + profile.AddQuery(info)
136 + } else if req.URL.Path == "/write" {
137 + profile.AddWrite(info)
138 + }
139 + }
140 +}
...@@ -145,6 +145,11 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) { ...@@ -145,6 +145,11 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {
145 } 145 }
146 for _, values := range row.Values { 146 for _, values := range row.Values {
147 for i, value := range values { 147 for i, value := range values {
148 + if value == nil {
149 + w.columns[i+2] = ""
150 + continue
151 + }
152 +
148 switch v := value.(type) { 153 switch v := value.(type) {
149 case float64: 154 case float64:
150 w.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64) 155 w.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64)
...@@ -160,6 +165,8 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) { ...@@ -160,6 +165,8 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {
160 } 165 }
161 case time.Time: 166 case time.Time:
162 w.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10) 167 w.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10)
168 + case *float64, *int64, *string, *bool:
169 + w.columns[i+2] = ""
163 } 170 }
164 } 171 }
165 csv.Write(w.columns) 172 csv.Write(w.columns)
......
1 +package httpd_test
2 +
3 +import (
4 + "net/http"
5 + "net/http/httptest"
6 + "net/url"
7 + "testing"
8 + "time"
9 +
10 + "github.com/influxdata/influxdb/influxql"
11 + "github.com/influxdata/influxdb/models"
12 + "github.com/influxdata/influxdb/services/httpd"
13 +)
14 +
15 +func TestResponseWriter_CSV(t *testing.T) {
16 + header := make(http.Header)
17 + header.Set("Accept", "text/csv")
18 + r := &http.Request{
19 + Header: header,
20 + URL: &url.URL{},
21 + }
22 + w := httptest.NewRecorder()
23 +
24 + writer := httpd.NewResponseWriter(w, r)
25 + writer.WriteResponse(httpd.Response{
26 + Results: []*influxql.Result{
27 + {
28 + StatementID: 0,
29 + Series: []*models.Row{
30 + {
31 + Name: "cpu",
32 + Tags: map[string]string{
33 + "host": "server01",
34 + "region": "uswest",
35 + },
36 + Columns: []string{"time", "value"},
37 + Values: [][]interface{}{
38 + {time.Unix(0, 10), float64(2.5)},
39 + {time.Unix(0, 20), int64(5)},
40 + {time.Unix(0, 30), nil},
41 + {time.Unix(0, 40), "foobar"},
42 + {time.Unix(0, 50), true},
43 + {time.Unix(0, 60), false},
44 + },
45 + },
46 + },
47 + },
48 + },
49 + })
50 +
51 + if got, want := w.Body.String(), `name,tags,time,value
52 +cpu,"host=server01,region=uswest",10,2.5
53 +cpu,"host=server01,region=uswest",20,5
54 +cpu,"host=server01,region=uswest",30,
55 +cpu,"host=server01,region=uswest",40,foobar
56 +cpu,"host=server01,region=uswest",50,true
57 +cpu,"host=server01,region=uswest",60,false
58 +`; got != want {
59 + t.Errorf("unexpected output:\n\ngot=%v\nwant=%s", got, want)
60 + }
61 +}
...@@ -147,7 +147,7 @@ func setMapValues(m map[string]interface{}, p models.Point) { ...@@ -147,7 +147,7 @@ func setMapValues(m map[string]interface{}, p models.Point) {
147 panic(err) 147 panic(err)
148 } 148 }
149 m["%f"] = getRandomFieldKey(fields) 149 m["%f"] = getRandomFieldKey(fields)
150 - m["%m"] = p.Name() 150 + m["%m"] = string(p.Name())
151 m["%t"] = getRandomTagPair(p.Tags()) 151 m["%t"] = getRandomTagPair(p.Tags())
152 m["%a"] = p.UnixNano() 152 m["%a"] = p.UnixNano()
153 } 153 }
......
...@@ -9,8 +9,8 @@ func TestCommunePoint(t *testing.T) { ...@@ -9,8 +9,8 @@ func TestCommunePoint(t *testing.T) {
9 pt := "write,tag=tagVal fooField=5 1460912595" 9 pt := "write,tag=tagVal fooField=5 1460912595"
10 comm.ch <- pt 10 comm.ch <- pt
11 point := comm.point("s") 11 point := comm.point("s")
12 - if point.Name() != "write" { 12 + if string(point.Name()) != "write" {
13 - t.Errorf("expected: write\ngot: %v", point.Name()) 13 + t.Errorf("expected: write\ngot: %v", string(point.Name()))
14 } 14 }
15 if point.Tags().GetString("tag") != "tagVal" { 15 if point.Tags().GetString("tag") != "tagVal" {
16 t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag")) 16 t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
...@@ -25,8 +25,8 @@ func TestCommunePoint(t *testing.T) { ...@@ -25,8 +25,8 @@ func TestCommunePoint(t *testing.T) {
25 // Make sure commune returns the prev point 25 // Make sure commune returns the prev point
26 comm.ch <- "" 26 comm.ch <- ""
27 point = comm.point("s") 27 point = comm.point("s")
28 - if point.Name() != "write" { 28 + if string(point.Name()) != "write" {
29 - t.Errorf("expected: write\ngot: %v", point.Name()) 29 + t.Errorf("expected: write\ngot: %v", string(point.Name()))
30 } 30 }
31 if point.Tags().GetString("tag") != "tagVal" { 31 if point.Tags().GetString("tag") != "tagVal" {
32 t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag")) 32 t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
...@@ -41,8 +41,8 @@ func TestSetCommune(t *testing.T) { ...@@ -41,8 +41,8 @@ func TestSetCommune(t *testing.T) {
41 ch := sf.SetCommune("foo_name") 41 ch := sf.SetCommune("foo_name")
42 ch <- "write,tag=tagVal fooField=5 1460912595" 42 ch <- "write,tag=tagVal fooField=5 1460912595"
43 pt := sf.GetPoint("foo_name", "s") 43 pt := sf.GetPoint("foo_name", "s")
44 - if pt.Name() != "write" { 44 + if string(pt.Name()) != "write" {
45 - t.Errorf("expected: write\ngot: %v", pt.Name()) 45 + t.Errorf("expected: write\ngot: %v", string(pt.Name()))
46 } 46 }
47 if pt.Tags().GetString("tag") != "tagVal" { 47 if pt.Tags().GetString("tag") != "tagVal" {
48 t.Errorf("expected: tagVal\ngot: %v", pt.Tags().GetString("tag")) 48 t.Errorf("expected: tagVal\ngot: %v", pt.Tags().GetString("tag"))
......
...@@ -131,18 +131,18 @@ fi ...@@ -131,18 +131,18 @@ fi
131 case $ENVIRONMENT_INDEX in 131 case $ENVIRONMENT_INDEX in
132 0) 132 0)
133 # 64 bit tests 133 # 64 bit tests
134 - run_test_docker Dockerfile_build_ubuntu64 test_64bit --generate --test --junit-report 134 + run_test_docker Dockerfile_build_ubuntu64 test_64bit --test --junit-report
135 rc=$? 135 rc=$?
136 ;; 136 ;;
137 1) 137 1)
138 # 64 bit race tests 138 # 64 bit race tests
139 GORACE="halt_on_error=1" 139 GORACE="halt_on_error=1"
140 - run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --generate --test --junit-report --race 140 + run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --test --junit-report --race
141 rc=$? 141 rc=$?
142 ;; 142 ;;
143 2) 143 2)
144 # 32 bit tests 144 # 32 bit tests
145 - run_test_docker Dockerfile_build_ubuntu32 test_32bit --generate --test --junit-report --arch=i386 145 + run_test_docker Dockerfile_build_ubuntu32 test_32bit --test --junit-report --arch=i386
146 rc=$? 146 rc=$?
147 ;; 147 ;;
148 "save") 148 "save")
......
...@@ -26,7 +26,6 @@ func TestMain(m *testing.M) { ...@@ -26,7 +26,6 @@ func TestMain(m *testing.M) {
26 c.Retention.Enabled = false 26 c.Retention.Enabled = false
27 c.Monitor.StoreEnabled = false 27 c.Monitor.StoreEnabled = false
28 c.Meta.LoggingEnabled = false 28 c.Meta.LoggingEnabled = false
29 - c.Admin.Enabled = false
30 c.Subscriber.Enabled = false 29 c.Subscriber.Enabled = false
31 c.ContinuousQuery.Enabled = false 30 c.ContinuousQuery.Enabled = false
32 c.Data.MaxValuesPerTag = 1000000 // 1M 31 c.Data.MaxValuesPerTag = 1000000 // 1M
...@@ -4266,13 +4265,13 @@ func TestServer_Query_AggregateSelectors(t *testing.T) { ...@@ -4266,13 +4265,13 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
4266 name: "distinct - time", 4265 name: "distinct - time",
4267 params: url.Values{"db": []string{"db0"}}, 4266 params: url.Values{"db": []string{"db0"}},
4268 command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, 4267 command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
4269 - exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, 4268 + exp: `{"error":"error parsing query: aggregate function distinct() cannot be combined with other functions or fields"}`,
4270 }, 4269 },
4271 &Query{ 4270 &Query{
4272 name: "distinct - tx", 4271 name: "distinct - tx",
4273 params: url.Values{"db": []string{"db0"}}, 4272 params: url.Values{"db": []string{"db0"}},
4274 command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, 4273 command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
4275 - exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`, 4274 + exp: `{"error":"error parsing query: aggregate function distinct() cannot be combined with other functions or fields"}`,
4276 }, 4275 },
4277 &Query{ 4276 &Query{
4278 name: "mean - baseline 30s", 4277 name: "mean - baseline 30s",
...@@ -4631,19 +4630,19 @@ func TestServer_Query_TopInt(t *testing.T) { ...@@ -4631,19 +4630,19 @@ func TestServer_Query_TopInt(t *testing.T) {
4631 name: "top - cpu - hourly", 4630 name: "top - cpu - hourly",
4632 params: url.Values{"db": []string{"db0"}}, 4631 params: url.Values{"db": []string{"db0"}},
4633 command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, 4632 command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
4634 - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`, 4633 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
4635 }, 4634 },
4636 &Query{ 4635 &Query{
4637 name: "top - cpu - 2 values hourly", 4636 name: "top - cpu - 2 values hourly",
4638 params: url.Values{"db": []string{"db0"}}, 4637 params: url.Values{"db": []string{"db0"}},
4639 command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, 4638 command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
4640 - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, 4639 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
4641 }, 4640 },
4642 &Query{ 4641 &Query{
4643 name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", 4642 name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket",
4644 params: url.Values{"db": []string{"db0"}}, 4643 params: url.Values{"db": []string{"db0"}},
4645 command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, 4644 command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
4646 - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`, 4645 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
4647 }, 4646 },
4648 &Query{ 4647 &Query{
4649 name: "top - memory - 2 values, two tags", 4648 name: "top - memory - 2 values, two tags",
...@@ -4986,6 +4985,11 @@ func TestServer_Query_Subqueries(t *testing.T) { ...@@ -4986,6 +4985,11 @@ func TestServer_Query_Subqueries(t *testing.T) {
4986 }, 4985 },
4987 &Query{ 4986 &Query{
4988 params: url.Values{"db": []string{"db0"}}, 4987 params: url.Values{"db": []string{"db0"}},
4988 + command: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`,
4989 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host"],"values":[["2000-01-01T00:00:20Z","server01"]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"]]}]}]}`,
4990 + },
4991 + &Query{
4992 + params: url.Values{"db": []string{"db0"}},
4989 command: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, 4993 command: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,
4990 exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`, 4994 exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`,
4991 }, 4995 },
...@@ -5007,7 +5011,7 @@ func TestServer_Query_Subqueries(t *testing.T) { ...@@ -5007,7 +5011,7 @@ func TestServer_Query_Subqueries(t *testing.T) {
5007 &Query{ 5011 &Query{
5008 params: url.Values{"db": []string{"db0"}}, 5012 params: url.Values{"db": []string{"db0"}},
5009 command: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, 5013 command: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,
5010 - exp: `{"results":[{"statement_id":0}]}`, 5014 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server01"],["2000-01-01T00:00:00Z","server02"]]}]}]}`,
5011 }, 5015 },
5012 &Query{ 5016 &Query{
5013 params: url.Values{"db": []string{"db0"}}, 5017 params: url.Values{"db": []string{"db0"}},
...@@ -5064,6 +5068,21 @@ func TestServer_Query_Subqueries(t *testing.T) { ...@@ -5064,6 +5068,21 @@ func TestServer_Query_Subqueries(t *testing.T) {
5064 command: `SELECT value FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`, 5068 command: `SELECT value FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,
5065 exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`, 5069 exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`,
5066 }, 5070 },
5071 + &Query{
5072 + params: url.Values{"db": []string{"db0"}},
5073 + command: `SELECT max FROM (SELECT max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`,
5074 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",70]]}]}]}`,
5075 + },
5076 + &Query{
5077 + params: url.Values{"db": []string{"db0"}},
5078 + command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,
5079 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`,
5080 + },
5081 + &Query{
5082 + params: url.Values{"db": []string{"db0"}},
5083 + command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host =~ /server/`,
5084 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",-2]]}]}]}`,
5085 + },
5067 }...) 5086 }...)
5068 5087
5069 for i, query := range test.queries { 5088 for i, query := range test.queries {
...@@ -7930,6 +7949,64 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) { ...@@ -7930,6 +7949,64 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) {
7930 } 7949 }
7931 } 7950 }
7932 7951
7952 +func TestServer_Query_Sample_LimitOffset(t *testing.T) {
7953 + t.Parallel()
7954 + s := OpenServer(NewConfig())
7955 + defer s.Close()
7956 +
7957 + if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
7958 + t.Fatal(err)
7959 + }
7960 +
7961 + writes := []string{
7962 + fmt.Sprintf(`cpu float=1,int=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
7963 + fmt.Sprintf(`cpu float=2,int=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
7964 + fmt.Sprintf(`cpu float=3,int=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()),
7965 + }
7966 +
7967 + test := NewTest("db0", "rp0")
7968 + test.writes = Writes{
7969 + &Write{data: strings.Join(writes, "\n")},
7970 + }
7971 +
7972 + test.addQueries([]*Query{
7973 + &Query{
7974 + name: "sample() with limit 1",
7975 + params: url.Values{"db": []string{"db0"}},
7976 + command: `SELECT sample(float, 3), int FROM cpu LIMIT 1`,
7977 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:00:00Z",1,1]]}]}]}`,
7978 + },
7979 + &Query{
7980 + name: "sample() with offset 1",
7981 + params: url.Values{"db": []string{"db0"}},
7982 + command: `SELECT sample(float, 3), int FROM cpu OFFSET 1`,
7983 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2],["2000-01-01T00:02:00Z",3,3]]}]}]}`,
7984 + },
7985 + &Query{
7986 + name: "sample() with limit 1 offset 1",
7987 + params: url.Values{"db": []string{"db0"}},
7988 + command: `SELECT sample(float, 3), int FROM cpu LIMIT 1 OFFSET 1`,
7989 + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2]]}]}]}`,
7990 + },
7991 + }...)
7992 +
7993 + if err := test.init(s); err != nil {
7994 + t.Fatalf("test init failed: %s", err)
7995 + }
7996 +
7997 + for _, query := range test.queries {
7998 + if query.skip {
7999 + t.Logf("SKIP:: %s", query.name)
8000 + continue
8001 + }
8002 + if err := query.Execute(s); err != nil {
8003 + t.Error(query.Error(err))
8004 + } else if !query.success() {
8005 + t.Error(query.failureMessage())
8006 + }
8007 + }
8008 +}
8009 +
7933 // Validate that nested aggregates don't panic 8010 // Validate that nested aggregates don't panic
7934 func TestServer_NestedAggregateWithMathPanics(t *testing.T) { 8011 func TestServer_NestedAggregateWithMathPanics(t *testing.T) {
7935 t.Parallel() 8012 t.Parallel()
......
...@@ -47,6 +47,10 @@ const ( ...@@ -47,6 +47,10 @@ const (
47 47
48 // DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement. 48 // DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement.
49 DefaultMaxValuesPerTag = 100000 49 DefaultMaxValuesPerTag = 100000
50 +
51 + // DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions
52 + // that can run at one time. A value of results in runtime.GOMAXPROCS(0) used at runtime.
53 + DefaultMaxConcurrentCompactions = 0
50 ) 54 )
51 55
52 // Config holds the configuration for the tsbd package. 56 // Config holds the configuration for the tsbd package.
...@@ -84,6 +88,12 @@ type Config struct { ...@@ -84,6 +88,12 @@ type Config struct {
84 // A value of 0 disables the limit. 88 // A value of 0 disables the limit.
85 MaxValuesPerTag int `toml:"max-values-per-tag"` 89 MaxValuesPerTag int `toml:"max-values-per-tag"`
86 90
91 + // MaxConcurrentCompactions is the maximum number of concurrent level and full compactions
92 + // that can be running at one time across all shards. Compactions scheduled to run when the
93 + // limit is reached are blocked until a running compaction completes. Snapshot compactions are
94 + // not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0).
95 + MaxConcurrentCompactions int `toml:"max-concurrent-compactions"`
96 +
87 TraceLoggingEnabled bool `toml:"trace-logging-enabled"` 97 TraceLoggingEnabled bool `toml:"trace-logging-enabled"`
88 } 98 }
89 99
...@@ -102,6 +112,7 @@ func NewConfig() Config { ...@@ -102,6 +112,7 @@ func NewConfig() Config {
102 112
103 MaxSeriesPerDatabase: DefaultMaxSeriesPerDatabase, 113 MaxSeriesPerDatabase: DefaultMaxSeriesPerDatabase,
104 MaxValuesPerTag: DefaultMaxValuesPerTag, 114 MaxValuesPerTag: DefaultMaxValuesPerTag,
115 + MaxConcurrentCompactions: DefaultMaxConcurrentCompactions,
105 116
106 TraceLoggingEnabled: false, 117 TraceLoggingEnabled: false,
107 } 118 }
...@@ -115,6 +126,10 @@ func (c *Config) Validate() error { ...@@ -115,6 +126,10 @@ func (c *Config) Validate() error {
115 return errors.New("Data.WALDir must be specified") 126 return errors.New("Data.WALDir must be specified")
116 } 127 }
117 128
129 + if c.MaxConcurrentCompactions < 0 {
130 + return errors.New("max-concurrent-compactions must be greater than 0")
131 + }
132 +
118 valid := false 133 valid := false
119 for _, e := range RegisteredEngines() { 134 for _, e := range RegisteredEngines() {
120 if e == c.Engine { 135 if e == c.Engine {
...@@ -152,5 +167,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { ...@@ -152,5 +167,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
152 "compact-full-write-cold-duration": c.CompactFullWriteColdDuration, 167 "compact-full-write-cold-duration": c.CompactFullWriteColdDuration,
153 "max-series-per-database": c.MaxSeriesPerDatabase, 168 "max-series-per-database": c.MaxSeriesPerDatabase,
154 "max-values-per-tag": c.MaxValuesPerTag, 169 "max-values-per-tag": c.MaxValuesPerTag,
170 + "max-concurrent-compactions": c.MaxConcurrentCompactions,
155 }), nil 171 }), nil
156 } 172 }
......
...@@ -12,6 +12,7 @@ import ( ...@@ -12,6 +12,7 @@ import (
12 "github.com/influxdata/influxdb/influxql" 12 "github.com/influxdata/influxdb/influxql"
13 "github.com/influxdata/influxdb/models" 13 "github.com/influxdata/influxdb/models"
14 "github.com/influxdata/influxdb/pkg/estimator" 14 "github.com/influxdata/influxdb/pkg/estimator"
15 + "github.com/influxdata/influxdb/pkg/limiter"
15 "github.com/uber-go/zap" 16 "github.com/uber-go/zap"
16 ) 17 )
17 18
...@@ -30,6 +31,8 @@ type Engine interface { ...@@ -30,6 +31,8 @@ type Engine interface {
30 Open() error 31 Open() error
31 Close() error 32 Close() error
32 SetEnabled(enabled bool) 33 SetEnabled(enabled bool)
34 + SetCompactionsEnabled(enabled bool)
35 +
33 WithLogger(zap.Logger) 36 WithLogger(zap.Logger)
34 37
35 LoadMetadataIndex(shardID uint64, index Index) error 38 LoadMetadataIndex(shardID uint64, index Index) error
...@@ -37,6 +40,7 @@ type Engine interface { ...@@ -37,6 +40,7 @@ type Engine interface {
37 CreateSnapshot() (string, error) 40 CreateSnapshot() (string, error)
38 Backup(w io.Writer, basePath string, since time.Time) error 41 Backup(w io.Writer, basePath string, since time.Time) error
39 Restore(r io.Reader, basePath string) error 42 Restore(r io.Reader, basePath string) error
43 + Import(r io.Reader, basePath string) error
40 44
41 CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error) 45 CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error)
42 WritePoints(points []models.Point) error 46 WritePoints(points []models.Point) error
...@@ -52,7 +56,7 @@ type Engine interface { ...@@ -52,7 +56,7 @@ type Engine interface {
52 MeasurementExists(name []byte) (bool, error) 56 MeasurementExists(name []byte) (bool, error)
53 MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error) 57 MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error)
54 MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) 58 MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)
55 - MeasurementFields(measurement string) *MeasurementFields 59 + MeasurementFields(measurement []byte) *MeasurementFields
56 ForEachMeasurementName(fn func(name []byte) error) error 60 ForEachMeasurementName(fn func(name []byte) error) error
57 DeleteMeasurement(name []byte) error 61 DeleteMeasurement(name []byte) error
58 62
...@@ -70,6 +74,8 @@ type Engine interface { ...@@ -70,6 +74,8 @@ type Engine interface {
70 // Statistics will return statistics relevant to this engine. 74 // Statistics will return statistics relevant to this engine.
71 Statistics(tags map[string]string) []models.Statistic 75 Statistics(tags map[string]string) []models.Statistic
72 LastModified() time.Time 76 LastModified() time.Time
77 + DiskSize() int64
78 + IsIdle() bool
73 79
74 io.WriterTo 80 io.WriterTo
75 } 81 }
...@@ -139,6 +145,7 @@ type EngineOptions struct { ...@@ -139,6 +145,7 @@ type EngineOptions struct {
139 IndexVersion string 145 IndexVersion string
140 ShardID uint64 146 ShardID uint64
141 InmemIndex interface{} // shared in-memory index 147 InmemIndex interface{} // shared in-memory index
148 + CompactionLimiter limiter.Fixed
142 149
143 Config Config 150 Config Config
144 } 151 }
......
...@@ -260,7 +260,7 @@ func (c *Cache) Write(key string, values []Value) error { ...@@ -260,7 +260,7 @@ func (c *Cache) Write(key string, values []Value) error {
260 260
261 // Enough room in the cache? 261 // Enough room in the cache?
262 limit := c.maxSize 262 limit := c.maxSize
263 - n := c.Size() + atomic.LoadUint64(&c.snapshotSize) + addedSize 263 + n := c.Size() + addedSize
264 264
265 if limit > 0 && n > limit { 265 if limit > 0 && n > limit {
266 atomic.AddInt64(&c.stats.WriteErr, 1) 266 atomic.AddInt64(&c.stats.WriteErr, 1)
...@@ -293,7 +293,7 @@ func (c *Cache) WriteMulti(values map[string][]Value) error { ...@@ -293,7 +293,7 @@ func (c *Cache) WriteMulti(values map[string][]Value) error {
293 293
294 // Enough room in the cache? 294 // Enough room in the cache?
295 limit := c.maxSize // maxSize is safe for reading without a lock. 295 limit := c.maxSize // maxSize is safe for reading without a lock.
296 - n := c.Size() + atomic.LoadUint64(&c.snapshotSize) + addedSize 296 + n := c.Size() + addedSize
297 if limit > 0 && n > limit { 297 if limit > 0 && n > limit {
298 atomic.AddInt64(&c.stats.WriteErr, 1) 298 atomic.AddInt64(&c.stats.WriteErr, 1)
299 return ErrCacheMemorySizeLimitExceeded(n, limit) 299 return ErrCacheMemorySizeLimitExceeded(n, limit)
...@@ -416,7 +416,7 @@ func (c *Cache) ClearSnapshot(success bool) { ...@@ -416,7 +416,7 @@ func (c *Cache) ClearSnapshot(success bool) {
416 416
417 // Size returns the number of point-calcuated bytes the cache currently uses. 417 // Size returns the number of point-calcuated bytes the cache currently uses.
418 func (c *Cache) Size() uint64 { 418 func (c *Cache) Size() uint64 {
419 - return atomic.LoadUint64(&c.size) 419 + return atomic.LoadUint64(&c.size) + atomic.LoadUint64(&c.snapshotSize)
420 } 420 }
421 421
422 // increaseSize increases size by delta. 422 // increaseSize increases size by delta.
......
...@@ -448,7 +448,7 @@ func TestCache_Snapshot_Stats(t *testing.T) { ...@@ -448,7 +448,7 @@ func TestCache_Snapshot_Stats(t *testing.T) {
448 } 448 }
449 449
450 // Store size should have been reset. 450 // Store size should have been reset.
451 - if got, exp := c.Size(), uint64(0); got != exp { 451 + if got, exp := c.Size(), uint64(16); got != exp {
452 t.Fatalf("got %v, expected %v", got, exp) 452 t.Fatalf("got %v, expected %v", got, exp)
453 } 453 }
454 454
...@@ -579,6 +579,10 @@ func TestCacheLoader_LoadSingle(t *testing.T) { ...@@ -579,6 +579,10 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
579 t.Fatal("write points", err) 579 t.Fatal("write points", err)
580 } 580 }
581 581
582 + if err := w.Flush(); err != nil {
583 + t.Fatalf("flush error: %v", err)
584 + }
585 +
582 // Load the cache using the segment. 586 // Load the cache using the segment.
583 cache := NewCache(1024, "") 587 cache := NewCache(1024, "")
584 loader := NewCacheLoader([]string{f.Name()}) 588 loader := NewCacheLoader([]string{f.Name()})
...@@ -643,6 +647,9 @@ func TestCacheLoader_LoadDouble(t *testing.T) { ...@@ -643,6 +647,9 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
643 if err := w1.Write(mustMarshalEntry(entry)); err != nil { 647 if err := w1.Write(mustMarshalEntry(entry)); err != nil {
644 t.Fatal("write points", err) 648 t.Fatal("write points", err)
645 } 649 }
650 + if err := w1.Flush(); err != nil {
651 + t.Fatalf("flush error: %v", err)
652 + }
646 } 653 }
647 654
648 values := map[string][]Value{ 655 values := map[string][]Value{
...@@ -707,6 +714,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) { ...@@ -707,6 +714,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
707 t.Fatal("write points", err) 714 t.Fatal("write points", err)
708 } 715 }
709 716
717 + if err := w.Flush(); err != nil {
718 + t.Fatalf("flush error: %v", err)
719 + }
720 +
710 dentry := &DeleteRangeWALEntry{ 721 dentry := &DeleteRangeWALEntry{
711 Keys: []string{"foo"}, 722 Keys: []string{"foo"},
712 Min: 2, 723 Min: 2,
...@@ -717,6 +728,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) { ...@@ -717,6 +728,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
717 t.Fatal("write points", err) 728 t.Fatal("write points", err)
718 } 729 }
719 730
731 + if err := w.Flush(); err != nil {
732 + t.Fatalf("flush error: %v", err)
733 + }
734 +
720 // Load the cache using the segment. 735 // Load the cache using the segment.
721 cache := NewCache(1024, "") 736 cache := NewCache(1024, "")
722 loader := NewCacheLoader([]string{f.Name()}) 737 loader := NewCacheLoader([]string{f.Name()})
......
...@@ -53,6 +53,8 @@ type CompactionPlanner interface { ...@@ -53,6 +53,8 @@ type CompactionPlanner interface {
53 Plan(lastWrite time.Time) []CompactionGroup 53 Plan(lastWrite time.Time) []CompactionGroup
54 PlanLevel(level int) []CompactionGroup 54 PlanLevel(level int) []CompactionGroup
55 PlanOptimize() []CompactionGroup 55 PlanOptimize() []CompactionGroup
56 + Release(group []CompactionGroup)
57 + FullyCompacted() bool
56 } 58 }
57 59
58 // DefaultPlanner implements CompactionPlanner using a strategy to roll up 60 // DefaultPlanner implements CompactionPlanner using a strategy to roll up
...@@ -60,17 +62,13 @@ type CompactionPlanner interface { ...@@ -60,17 +62,13 @@ type CompactionPlanner interface {
60 // to minimize the number of TSM files on disk while rolling up a bounder number 62 // to minimize the number of TSM files on disk while rolling up a bounder number
61 // of files. 63 // of files.
62 type DefaultPlanner struct { 64 type DefaultPlanner struct {
63 - FileStore interface { 65 + FileStore fileStore
64 - Stats() []FileStat
65 - LastModified() time.Time
66 - BlockCount(path string, idx int) int
67 - }
68 66
69 - // CompactFullWriteColdDuration specifies the length of time after 67 + // compactFullWriteColdDuration specifies the length of time after
70 // which if no writes have been committed to the WAL, the engine will 68 // which if no writes have been committed to the WAL, the engine will
71 // do a full compaction of the TSM files in this shard. This duration 69 // do a full compaction of the TSM files in this shard. This duration
72 // should always be greater than the CacheFlushWriteColdDuraion 70 // should always be greater than the CacheFlushWriteColdDuraion
73 - CompactFullWriteColdDuration time.Duration 71 + compactFullWriteColdDuration time.Duration
74 72
75 // lastPlanCheck is the last time Plan was called 73 // lastPlanCheck is the last time Plan was called
76 lastPlanCheck time.Time 74 lastPlanCheck time.Time
...@@ -81,6 +79,24 @@ type DefaultPlanner struct { ...@@ -81,6 +79,24 @@ type DefaultPlanner struct {
81 79
82 // lastGenerations is the last set of generations found by findGenerations 80 // lastGenerations is the last set of generations found by findGenerations
83 lastGenerations tsmGenerations 81 lastGenerations tsmGenerations
82 +
83 + // filesInUse is the set of files that have been returned as part of a plan and might
84 + // be being compacted. Two plans should not return the same file at any given time.
85 + filesInUse map[string]struct{}
86 +}
87 +
88 +type fileStore interface {
89 + Stats() []FileStat
90 + LastModified() time.Time
91 + BlockCount(path string, idx int) int
92 +}
93 +
94 +func NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPlanner {
95 + return &DefaultPlanner{
96 + FileStore: fs,
97 + compactFullWriteColdDuration: writeColdDuration,
98 + filesInUse: make(map[string]struct{}),
99 + }
84 } 100 }
85 101
86 // tsmGeneration represents the TSM files within a generation. 102 // tsmGeneration represents the TSM files within a generation.
...@@ -106,7 +122,7 @@ func (t *tsmGeneration) level() int { ...@@ -106,7 +122,7 @@ func (t *tsmGeneration) level() int {
106 // 1 file with a sequence num of 1. Level 2 is generated by compacting multiple 122 // 1 file with a sequence num of 1. Level 2 is generated by compacting multiple
107 // level 1 files. Level 3 is generate by compacting multiple level 2 files. Level 123 // level 1 files. Level 3 is generate by compacting multiple level 2 files. Level
108 // 4 is for anything else. 124 // 4 is for anything else.
109 - _, seq, _ := ParseTSMFileName(t.files[0].Path) 125 + _, seq, _ := ParseTSMFileName(t.files[len(t.files)-1].Path)
110 if seq < 4 { 126 if seq < 4 {
111 return seq 127 return seq
112 } 128 }
...@@ -129,6 +145,12 @@ func (t *tsmGeneration) hasTombstones() bool { ...@@ -129,6 +145,12 @@ func (t *tsmGeneration) hasTombstones() bool {
129 return false 145 return false
130 } 146 }
131 147
148 +// FullyCompacted returns true if the shard is fully compacted.
149 +func (c *DefaultPlanner) FullyCompacted() bool {
150 + gens := c.findGenerations()
151 + return len(gens) <= 1 && !gens.hasTombstones()
152 +}
153 +
132 // PlanLevel returns a set of TSM files to rewrite for a specific level. 154 // PlanLevel returns a set of TSM files to rewrite for a specific level.
133 func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { 155 func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
134 // Determine the generations from all files on disk. We need to treat 156 // Determine the generations from all files on disk. We need to treat
...@@ -149,7 +171,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { ...@@ -149,7 +171,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
149 for i := 0; i < len(generations); i++ { 171 for i := 0; i < len(generations); i++ {
150 cur := generations[i] 172 cur := generations[i]
151 173
152 - if len(currentGen) == 0 || currentGen[0].level() == cur.level() { 174 + if len(currentGen) == 0 || currentGen.level() == cur.level() {
153 currentGen = append(currentGen, cur) 175 currentGen = append(currentGen, cur)
154 continue 176 continue
155 } 177 }
...@@ -166,7 +188,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { ...@@ -166,7 +188,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
166 // Remove any groups in the wrong level 188 // Remove any groups in the wrong level
167 var levelGroups []tsmGenerations 189 var levelGroups []tsmGenerations
168 for _, cur := range groups { 190 for _, cur := range groups {
169 - if cur[0].level() == level { 191 + if cur.level() == level {
170 levelGroups = append(levelGroups, cur) 192 levelGroups = append(levelGroups, cur)
171 } 193 }
172 } 194 }
...@@ -205,6 +227,10 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { ...@@ -205,6 +227,10 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
205 } 227 }
206 } 228 }
207 229
230 + if !c.acquire(cGroups) {
231 + return nil
232 + }
233 +
208 return cGroups 234 return cGroups
209 } 235 }
210 236
...@@ -230,7 +256,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup { ...@@ -230,7 +256,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
230 for i := 0; i < len(generations); i++ { 256 for i := 0; i < len(generations); i++ {
231 cur := generations[i] 257 cur := generations[i]
232 258
233 - if len(currentGen) == 0 || currentGen[0].level() == cur.level() { 259 + if len(currentGen) == 0 || currentGen.level() == cur.level() {
234 currentGen = append(currentGen, cur) 260 currentGen = append(currentGen, cur)
235 continue 261 continue
236 } 262 }
...@@ -248,7 +274,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup { ...@@ -248,7 +274,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
248 // with the level planners 274 // with the level planners
249 var levelGroups []tsmGenerations 275 var levelGroups []tsmGenerations
250 for _, cur := range groups { 276 for _, cur := range groups {
251 - if cur[0].level() == 4 { 277 + if cur.level() == 4 {
252 levelGroups = append(levelGroups, cur) 278 levelGroups = append(levelGroups, cur)
253 } 279 }
254 } 280 }
...@@ -270,6 +296,10 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup { ...@@ -270,6 +296,10 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
270 cGroups = append(cGroups, cGroup) 296 cGroups = append(cGroups, cGroup)
271 } 297 }
272 298
299 + if !c.acquire(cGroups) {
300 + return nil
301 + }
302 +
273 return cGroups 303 return cGroups
274 } 304 }
275 305
...@@ -279,7 +309,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup { ...@@ -279,7 +309,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
279 generations := c.findGenerations() 309 generations := c.findGenerations()
280 310
281 // first check if we should be doing a full compaction because nothing has been written in a long time 311 // first check if we should be doing a full compaction because nothing has been written in a long time
282 - if c.CompactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 { 312 + if c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 {
283 var tsmFiles []string 313 var tsmFiles []string
284 var genCount int 314 var genCount int
285 for i, group := range generations { 315 for i, group := range generations {
...@@ -316,7 +346,11 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup { ...@@ -316,7 +346,11 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
316 return nil 346 return nil
317 } 347 }
318 348
319 - return []CompactionGroup{tsmFiles} 349 + group := []CompactionGroup{tsmFiles}
350 + if !c.acquire(group) {
351 + return nil
352 + }
353 + return group
320 } 354 }
321 355
322 // don't plan if nothing has changed in the filestore 356 // don't plan if nothing has changed in the filestore
...@@ -449,6 +483,9 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup { ...@@ -449,6 +483,9 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
449 tsmFiles = append(tsmFiles, cGroup) 483 tsmFiles = append(tsmFiles, cGroup)
450 } 484 }
451 485
486 + if !c.acquire(tsmFiles) {
487 + return nil
488 + }
452 return tsmFiles 489 return tsmFiles
453 } 490 }
454 491
...@@ -496,6 +533,40 @@ func (c *DefaultPlanner) findGenerations() tsmGenerations { ...@@ -496,6 +533,40 @@ func (c *DefaultPlanner) findGenerations() tsmGenerations {
496 return orderedGenerations 533 return orderedGenerations
497 } 534 }
498 535
536 +func (c *DefaultPlanner) acquire(groups []CompactionGroup) bool {
537 + c.mu.Lock()
538 + defer c.mu.Unlock()
539 +
540 + // See if the new files are already in use
541 + for _, g := range groups {
542 + for _, f := range g {
543 + if _, ok := c.filesInUse[f]; ok {
544 + return false
545 + }
546 + }
547 + }
548 +
549 + // Mark all the new files in use
550 + for _, g := range groups {
551 + for _, f := range g {
552 + c.filesInUse[f] = struct{}{}
553 + }
554 + }
555 + return true
556 +}
557 +
558 +// Release removes the files reference in each compaction group allowing new plans
559 +// to be able to use them.
560 +func (c *DefaultPlanner) Release(groups []CompactionGroup) {
561 + c.mu.Lock()
562 + defer c.mu.Unlock()
563 + for _, g := range groups {
564 + for _, f := range g {
565 + delete(c.filesInUse, f)
566 + }
567 + }
568 +}
569 +
499 // Compactor merges multiple TSM files into new files or 570 // Compactor merges multiple TSM files into new files or
500 // writes a Cache into 1 or more TSM files. 571 // writes a Cache into 1 or more TSM files.
501 type Compactor struct { 572 type Compactor struct {
...@@ -1249,6 +1320,17 @@ func (a tsmGenerations) hasTombstones() bool { ...@@ -1249,6 +1320,17 @@ func (a tsmGenerations) hasTombstones() bool {
1249 return false 1320 return false
1250 } 1321 }
1251 1322
1323 +func (a tsmGenerations) level() int {
1324 + var level int
1325 + for _, g := range a {
1326 + lev := g.level()
1327 + if lev > level {
1328 + level = lev
1329 + }
1330 + }
1331 + return level
1332 +}
1333 +
1252 func (a tsmGenerations) chunk(size int) []tsmGenerations { 1334 func (a tsmGenerations) chunk(size int) []tsmGenerations {
1253 var chunks []tsmGenerations 1335 var chunks []tsmGenerations
1254 for len(a) > 0 { 1336 for len(a) > 0 {
......
...@@ -8,6 +8,7 @@ import ( ...@@ -8,6 +8,7 @@ import (
8 "testing" 8 "testing"
9 "time" 9 "time"
10 10
11 + "github.com/influxdata/influxdb/tsdb"
11 "github.com/influxdata/influxdb/tsdb/engine/tsm1" 12 "github.com/influxdata/influxdb/tsdb/engine/tsm1"
12 ) 13 )
13 14
...@@ -1090,8 +1091,8 @@ func TestCacheKeyIterator_Chunked(t *testing.T) { ...@@ -1090,8 +1091,8 @@ func TestCacheKeyIterator_Chunked(t *testing.T) {
1090 } 1091 }
1091 1092
1092 func TestDefaultPlanner_Plan_Min(t *testing.T) { 1093 func TestDefaultPlanner_Plan_Min(t *testing.T) {
1093 - cp := &tsm1.DefaultPlanner{ 1094 + cp := tsm1.NewDefaultPlanner(
1094 - FileStore: &fakeFileStore{ 1095 + &fakeFileStore{
1095 PathsFn: func() []tsm1.FileStat { 1096 PathsFn: func() []tsm1.FileStat {
1096 return []tsm1.FileStat{ 1097 return []tsm1.FileStat{
1097 tsm1.FileStat{ 1098 tsm1.FileStat{
...@@ -1108,8 +1109,8 @@ func TestDefaultPlanner_Plan_Min(t *testing.T) { ...@@ -1108,8 +1109,8 @@ func TestDefaultPlanner_Plan_Min(t *testing.T) {
1108 }, 1109 },
1109 } 1110 }
1110 }, 1111 },
1111 - }, 1112 + }, tsdb.DefaultCompactFullWriteColdDuration,
1112 - } 1113 + )
1113 1114
1114 tsm := cp.Plan(time.Now()) 1115 tsm := cp.Plan(time.Now())
1115 if exp, got := 0, len(tsm); got != exp { 1116 if exp, got := 0, len(tsm); got != exp {
...@@ -1151,13 +1152,13 @@ func TestDefaultPlanner_Plan_CombineSequence(t *testing.T) { ...@@ -1151,13 +1152,13 @@ func TestDefaultPlanner_Plan_CombineSequence(t *testing.T) {
1151 }, 1152 },
1152 } 1153 }
1153 1154
1154 - cp := &tsm1.DefaultPlanner{ 1155 + cp := tsm1.NewDefaultPlanner(
1155 - FileStore: &fakeFileStore{ 1156 + &fakeFileStore{
1156 PathsFn: func() []tsm1.FileStat { 1157 PathsFn: func() []tsm1.FileStat {
1157 return data 1158 return data
1158 }, 1159 },
1159 - }, 1160 + }, tsdb.DefaultCompactFullWriteColdDuration,
1160 - } 1161 + )
1161 1162
1162 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]} 1163 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
1163 tsm := cp.Plan(time.Now()) 1164 tsm := cp.Plan(time.Now())
...@@ -1213,13 +1214,11 @@ func TestDefaultPlanner_Plan_MultipleGroups(t *testing.T) { ...@@ -1213,13 +1214,11 @@ func TestDefaultPlanner_Plan_MultipleGroups(t *testing.T) {
1213 }, 1214 },
1214 } 1215 }
1215 1216
1216 - cp := &tsm1.DefaultPlanner{ 1217 + cp := tsm1.NewDefaultPlanner(&fakeFileStore{
1217 - FileStore: &fakeFileStore{
1218 PathsFn: func() []tsm1.FileStat { 1218 PathsFn: func() []tsm1.FileStat {
1219 return data 1219 return data
1220 }, 1220 },
1221 - }, 1221 + }, tsdb.DefaultCompactFullWriteColdDuration)
1222 - }
1223 1222
1224 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], 1223 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3],
1225 data[4], data[5], data[6], data[7]} 1224 data[4], data[5], data[6], data[7]}
...@@ -1280,13 +1279,13 @@ func TestDefaultPlanner_PlanLevel_SmallestCompactionStep(t *testing.T) { ...@@ -1280,13 +1279,13 @@ func TestDefaultPlanner_PlanLevel_SmallestCompactionStep(t *testing.T) {
1280 }, 1279 },
1281 } 1280 }
1282 1281
1283 - cp := &tsm1.DefaultPlanner{ 1282 + cp := tsm1.NewDefaultPlanner(
1284 - FileStore: &fakeFileStore{ 1283 + &fakeFileStore{
1285 PathsFn: func() []tsm1.FileStat { 1284 PathsFn: func() []tsm1.FileStat {
1286 return data 1285 return data
1287 }, 1286 },
1288 - }, 1287 + }, tsdb.DefaultCompactFullWriteColdDuration,
1289 - } 1288 + )
1290 1289
1291 expFiles := []tsm1.FileStat{data[4], data[5]} 1290 expFiles := []tsm1.FileStat{data[4], data[5]}
1292 tsm := cp.PlanLevel(1) 1291 tsm := cp.PlanLevel(1)
...@@ -1312,11 +1311,11 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) { ...@@ -1312,11 +1311,11 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) {
1312 Size: 1 * 1024 * 1024, 1311 Size: 1 * 1024 * 1024,
1313 }, 1312 },
1314 tsm1.FileStat{ 1313 tsm1.FileStat{
1315 - Path: "03-03.tsm1", 1314 + Path: "03-02.tsm1",
1316 Size: 2 * 1024 * 1024 * 1024, 1315 Size: 2 * 1024 * 1024 * 1024,
1317 }, 1316 },
1318 tsm1.FileStat{ 1317 tsm1.FileStat{
1319 - Path: "03-04.tsm1", 1318 + Path: "03-03.tsm1",
1320 Size: 10 * 1024 * 1024, 1319 Size: 10 * 1024 * 1024,
1321 }, 1320 },
1322 tsm1.FileStat{ 1321 tsm1.FileStat{
...@@ -1333,13 +1332,13 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) { ...@@ -1333,13 +1332,13 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) {
1333 }, 1332 },
1334 } 1333 }
1335 1334
1336 - cp := &tsm1.DefaultPlanner{ 1335 + cp := tsm1.NewDefaultPlanner(
1337 - FileStore: &fakeFileStore{ 1336 + &fakeFileStore{
1338 PathsFn: func() []tsm1.FileStat { 1337 PathsFn: func() []tsm1.FileStat {
1339 return data 1338 return data
1340 }, 1339 },
1341 - }, 1340 + }, tsdb.DefaultCompactFullWriteColdDuration,
1342 - } 1341 + )
1343 1342
1344 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4]} 1343 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4]}
1345 tsm := cp.PlanLevel(3) 1344 tsm := cp.PlanLevel(3)
...@@ -1382,13 +1381,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedLowLevel(t *testing.T) { ...@@ -1382,13 +1381,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedLowLevel(t *testing.T) {
1382 }, 1381 },
1383 } 1382 }
1384 1383
1385 - cp := &tsm1.DefaultPlanner{ 1384 + cp := tsm1.NewDefaultPlanner(
1386 - FileStore: &fakeFileStore{ 1385 + &fakeFileStore{
1387 PathsFn: func() []tsm1.FileStat { 1386 PathsFn: func() []tsm1.FileStat {
1388 return data 1387 return data
1389 }, 1388 },
1390 - }, 1389 + }, tsdb.DefaultCompactFullWriteColdDuration,
1391 - } 1390 + )
1392 1391
1393 expFiles := []tsm1.FileStat{data[2], data[3]} 1392 expFiles := []tsm1.FileStat{data[2], data[3]}
1394 tsm := cp.PlanLevel(1) 1393 tsm := cp.PlanLevel(1)
...@@ -1435,13 +1434,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedHighLevel(t *testing.T) { ...@@ -1435,13 +1434,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedHighLevel(t *testing.T) {
1435 }, 1434 },
1436 } 1435 }
1437 1436
1438 - cp := &tsm1.DefaultPlanner{ 1437 + cp := tsm1.NewDefaultPlanner(
1439 - FileStore: &fakeFileStore{ 1438 + &fakeFileStore{
1440 PathsFn: func() []tsm1.FileStat { 1439 PathsFn: func() []tsm1.FileStat {
1441 return data 1440 return data
1442 }, 1441 },
1443 - }, 1442 + }, tsdb.DefaultCompactFullWriteColdDuration,
1444 - } 1443 + )
1445 1444
1446 expFiles := []tsm1.FileStat{} 1445 expFiles := []tsm1.FileStat{}
1447 tsm := cp.PlanLevel(3) 1446 tsm := cp.PlanLevel(3)
...@@ -1478,13 +1477,13 @@ func TestDefaultPlanner_PlanLevel3_MinFiles(t *testing.T) { ...@@ -1478,13 +1477,13 @@ func TestDefaultPlanner_PlanLevel3_MinFiles(t *testing.T) {
1478 }, 1477 },
1479 } 1478 }
1480 1479
1481 - cp := &tsm1.DefaultPlanner{ 1480 + cp := tsm1.NewDefaultPlanner(
1482 - FileStore: &fakeFileStore{ 1481 + &fakeFileStore{
1483 PathsFn: func() []tsm1.FileStat { 1482 PathsFn: func() []tsm1.FileStat {
1484 return data 1483 return data
1485 }, 1484 },
1486 - }, 1485 + }, tsdb.DefaultCompactFullWriteColdDuration,
1487 - } 1486 + )
1488 1487
1489 expFiles := []tsm1.FileStat{} 1488 expFiles := []tsm1.FileStat{}
1490 tsm := cp.PlanLevel(3) 1489 tsm := cp.PlanLevel(3)
...@@ -1510,13 +1509,13 @@ func TestDefaultPlanner_PlanLevel2_MinFiles(t *testing.T) { ...@@ -1510,13 +1509,13 @@ func TestDefaultPlanner_PlanLevel2_MinFiles(t *testing.T) {
1510 }, 1509 },
1511 } 1510 }
1512 1511
1513 - cp := &tsm1.DefaultPlanner{ 1512 + cp := tsm1.NewDefaultPlanner(
1514 - FileStore: &fakeFileStore{ 1513 + &fakeFileStore{
1515 PathsFn: func() []tsm1.FileStat { 1514 PathsFn: func() []tsm1.FileStat {
1516 return data 1515 return data
1517 }, 1516 },
1518 - }, 1517 + }, tsdb.DefaultCompactFullWriteColdDuration,
1519 - } 1518 + )
1520 1519
1521 expFiles := []tsm1.FileStat{} 1520 expFiles := []tsm1.FileStat{}
1522 tsm := cp.PlanLevel(2) 1521 tsm := cp.PlanLevel(2)
...@@ -1554,13 +1553,13 @@ func TestDefaultPlanner_PlanLevel_Tombstone(t *testing.T) { ...@@ -1554,13 +1553,13 @@ func TestDefaultPlanner_PlanLevel_Tombstone(t *testing.T) {
1554 }, 1553 },
1555 } 1554 }
1556 1555
1557 - cp := &tsm1.DefaultPlanner{ 1556 + cp := tsm1.NewDefaultPlanner(
1558 - FileStore: &fakeFileStore{ 1557 + &fakeFileStore{
1559 PathsFn: func() []tsm1.FileStat { 1558 PathsFn: func() []tsm1.FileStat {
1560 return data 1559 return data
1561 }, 1560 },
1562 - }, 1561 + }, tsdb.DefaultCompactFullWriteColdDuration,
1563 - } 1562 + )
1564 1563
1565 expFiles := []tsm1.FileStat{data[0], data[1]} 1564 expFiles := []tsm1.FileStat{data[0], data[1]}
1566 tsm := cp.PlanLevel(3) 1565 tsm := cp.PlanLevel(3)
...@@ -1603,13 +1602,13 @@ func TestDefaultPlanner_PlanLevel_Multiple(t *testing.T) { ...@@ -1603,13 +1602,13 @@ func TestDefaultPlanner_PlanLevel_Multiple(t *testing.T) {
1603 }, 1602 },
1604 } 1603 }
1605 1604
1606 - cp := &tsm1.DefaultPlanner{ 1605 + cp := tsm1.NewDefaultPlanner(
1607 - FileStore: &fakeFileStore{ 1606 + &fakeFileStore{
1608 PathsFn: func() []tsm1.FileStat { 1607 PathsFn: func() []tsm1.FileStat {
1609 return data 1608 return data
1610 }, 1609 },
1611 - }, 1610 + }, tsdb.DefaultCompactFullWriteColdDuration,
1612 - } 1611 + )
1613 1612
1614 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]} 1613 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
1615 expFiles2 := []tsm1.FileStat{data[4], data[5]} 1614 expFiles2 := []tsm1.FileStat{data[4], data[5]}
...@@ -1652,13 +1651,13 @@ func TestDefaultPlanner_PlanOptimize_NoLevel4(t *testing.T) { ...@@ -1652,13 +1651,13 @@ func TestDefaultPlanner_PlanOptimize_NoLevel4(t *testing.T) {
1652 }, 1651 },
1653 } 1652 }
1654 1653
1655 - cp := &tsm1.DefaultPlanner{ 1654 + cp := tsm1.NewDefaultPlanner(
1656 - FileStore: &fakeFileStore{ 1655 + &fakeFileStore{
1657 PathsFn: func() []tsm1.FileStat { 1656 PathsFn: func() []tsm1.FileStat {
1658 return data 1657 return data
1659 }, 1658 },
1660 - }, 1659 + }, tsdb.DefaultCompactFullWriteColdDuration,
1661 - } 1660 + )
1662 1661
1663 expFiles := []tsm1.FileStat{} 1662 expFiles := []tsm1.FileStat{}
1664 tsm := cp.PlanOptimize() 1663 tsm := cp.PlanOptimize()
...@@ -1695,13 +1694,13 @@ func TestDefaultPlanner_PlanOptimize_Level4(t *testing.T) { ...@@ -1695,13 +1694,13 @@ func TestDefaultPlanner_PlanOptimize_Level4(t *testing.T) {
1695 }, 1694 },
1696 } 1695 }
1697 1696
1698 - cp := &tsm1.DefaultPlanner{ 1697 + cp := tsm1.NewDefaultPlanner(
1699 - FileStore: &fakeFileStore{ 1698 + &fakeFileStore{
1700 PathsFn: func() []tsm1.FileStat { 1699 PathsFn: func() []tsm1.FileStat {
1701 return data 1700 return data
1702 }, 1701 },
1703 - }, 1702 + }, tsdb.DefaultCompactFullWriteColdDuration,
1704 - } 1703 + )
1705 1704
1706 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]} 1705 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
1707 tsm := cp.PlanOptimize() 1706 tsm := cp.PlanOptimize()
...@@ -1760,13 +1759,13 @@ func TestDefaultPlanner_PlanOptimize_Multiple(t *testing.T) { ...@@ -1760,13 +1759,13 @@ func TestDefaultPlanner_PlanOptimize_Multiple(t *testing.T) {
1760 }, 1759 },
1761 } 1760 }
1762 1761
1763 - cp := &tsm1.DefaultPlanner{ 1762 + cp := tsm1.NewDefaultPlanner(
1764 - FileStore: &fakeFileStore{ 1763 + &fakeFileStore{
1765 PathsFn: func() []tsm1.FileStat { 1764 PathsFn: func() []tsm1.FileStat {
1766 return data 1765 return data
1767 }, 1766 },
1768 - }, 1767 + }, tsdb.DefaultCompactFullWriteColdDuration,
1769 - } 1768 + )
1770 1769
1771 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]} 1770 expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
1772 expFiles2 := []tsm1.FileStat{data[5], data[6], data[7], data[8]} 1771 expFiles2 := []tsm1.FileStat{data[5], data[6], data[7], data[8]}
...@@ -1813,13 +1812,13 @@ func TestDefaultPlanner_PlanOptimize_Optimized(t *testing.T) { ...@@ -1813,13 +1812,13 @@ func TestDefaultPlanner_PlanOptimize_Optimized(t *testing.T) {
1813 }, 1812 },
1814 } 1813 }
1815 1814
1816 - cp := &tsm1.DefaultPlanner{ 1815 + cp := tsm1.NewDefaultPlanner(
1817 - FileStore: &fakeFileStore{ 1816 + &fakeFileStore{
1818 PathsFn: func() []tsm1.FileStat { 1817 PathsFn: func() []tsm1.FileStat {
1819 return data 1818 return data
1820 }, 1819 },
1821 - }, 1820 + }, tsdb.DefaultCompactFullWriteColdDuration,
1822 - } 1821 + )
1823 1822
1824 expFiles := []tsm1.FileStat{} 1823 expFiles := []tsm1.FileStat{}
1825 tsm := cp.PlanOptimize() 1824 tsm := cp.PlanOptimize()
...@@ -1845,13 +1844,13 @@ func TestDefaultPlanner_PlanOptimize_Tombstones(t *testing.T) { ...@@ -1845,13 +1844,13 @@ func TestDefaultPlanner_PlanOptimize_Tombstones(t *testing.T) {
1845 }, 1844 },
1846 } 1845 }
1847 1846
1848 - cp := &tsm1.DefaultPlanner{ 1847 + cp := tsm1.NewDefaultPlanner(
1849 - FileStore: &fakeFileStore{ 1848 + &fakeFileStore{
1850 PathsFn: func() []tsm1.FileStat { 1849 PathsFn: func() []tsm1.FileStat {
1851 return data 1850 return data
1852 }, 1851 },
1853 - }, 1852 + }, tsdb.DefaultCompactFullWriteColdDuration,
1854 - } 1853 + )
1855 1854
1856 expFiles := []tsm1.FileStat{data[0], data[1], data[2]} 1855 expFiles := []tsm1.FileStat{data[0], data[1], data[2]}
1857 tsm := cp.PlanOptimize() 1856 tsm := cp.PlanOptimize()
...@@ -1897,14 +1896,14 @@ func TestDefaultPlanner_Plan_FullOnCold(t *testing.T) { ...@@ -1897,14 +1896,14 @@ func TestDefaultPlanner_Plan_FullOnCold(t *testing.T) {
1897 }, 1896 },
1898 } 1897 }
1899 1898
1900 - cp := &tsm1.DefaultPlanner{ 1899 + cp := tsm1.NewDefaultPlanner(
1901 - FileStore: &fakeFileStore{ 1900 + &fakeFileStore{
1902 PathsFn: func() []tsm1.FileStat { 1901 PathsFn: func() []tsm1.FileStat {
1903 return data 1902 return data
1904 }, 1903 },
1905 }, 1904 },
1906 - CompactFullWriteColdDuration: time.Nanosecond, 1905 + time.Nanosecond,
1907 - } 1906 + )
1908 1907
1909 tsm := cp.Plan(time.Now().Add(-time.Second)) 1908 tsm := cp.Plan(time.Now().Add(-time.Second))
1910 if exp, got := len(data), len(tsm[0]); got != exp { 1909 if exp, got := len(data), len(tsm[0]); got != exp {
...@@ -1932,13 +1931,13 @@ func TestDefaultPlanner_Plan_SkipMaxSizeFiles(t *testing.T) { ...@@ -1932,13 +1931,13 @@ func TestDefaultPlanner_Plan_SkipMaxSizeFiles(t *testing.T) {
1932 }, 1931 },
1933 } 1932 }
1934 1933
1935 - cp := &tsm1.DefaultPlanner{ 1934 + cp := tsm1.NewDefaultPlanner(
1936 - FileStore: &fakeFileStore{ 1935 + &fakeFileStore{
1937 PathsFn: func() []tsm1.FileStat { 1936 PathsFn: func() []tsm1.FileStat {
1938 return data 1937 return data
1939 }, 1938 },
1940 - }, 1939 + }, tsdb.DefaultCompactFullWriteColdDuration,
1941 - } 1940 + )
1942 1941
1943 tsm := cp.Plan(time.Now()) 1942 tsm := cp.Plan(time.Now())
1944 if exp, got := 0, len(tsm); got != exp { 1943 if exp, got := 0, len(tsm); got != exp {
...@@ -1975,15 +1974,13 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) { ...@@ -1975,15 +1974,13 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) {
1975 blockCount: 1000, 1974 blockCount: 1000,
1976 } 1975 }
1977 1976
1978 - cp := &tsm1.DefaultPlanner{ 1977 + cp := tsm1.NewDefaultPlanner(fs, time.Nanosecond)
1979 - FileStore: fs, 1978 + plan := cp.Plan(time.Now().Add(-time.Second))
1980 - CompactFullWriteColdDuration: time.Nanosecond,
1981 - }
1982 -
1983 // first verify that our test set would return files 1979 // first verify that our test set would return files
1984 - if exp, got := 4, len(cp.Plan(time.Now().Add(-time.Second))[0]); got != exp { 1980 + if exp, got := 4, len(plan[0]); got != exp {
1985 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) 1981 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
1986 } 1982 }
1983 + cp.Release(plan)
1987 1984
1988 // skip planning if all files are over the limit 1985 // skip planning if all files are over the limit
1989 over := []tsm1.FileStat{ 1986 over := []tsm1.FileStat{
...@@ -2017,14 +2014,18 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) { ...@@ -2017,14 +2014,18 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) {
2017 } 2014 }
2018 2015
2019 cp.FileStore = overFs 2016 cp.FileStore = overFs
2020 - if exp, got := 0, len(cp.Plan(time.Now().Add(-time.Second))); got != exp { 2017 + plan = cp.Plan(time.Now().Add(-time.Second))
2018 + if exp, got := 0, len(plan); got != exp {
2021 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) 2019 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
2022 } 2020 }
2021 + cp.Release(plan)
2023 2022
2023 + plan = cp.PlanOptimize()
2024 // ensure the optimize planner would pick this up 2024 // ensure the optimize planner would pick this up
2025 - if exp, got := 1, len(cp.PlanOptimize()); got != exp { 2025 + if exp, got := 1, len(plan); got != exp {
2026 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) 2026 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
2027 } 2027 }
2028 + cp.Release(plan)
2028 2029
2029 cp.FileStore = fs 2030 cp.FileStore = fs
2030 // ensure that it will plan if last modified has changed 2031 // ensure that it will plan if last modified has changed
...@@ -2082,15 +2083,14 @@ func TestDefaultPlanner_Plan_TwoGenLevel3(t *testing.T) { ...@@ -2082,15 +2083,14 @@ func TestDefaultPlanner_Plan_TwoGenLevel3(t *testing.T) {
2082 }, 2083 },
2083 } 2084 }
2084 2085
2085 - cp := &tsm1.DefaultPlanner{ 2086 + cp := tsm1.NewDefaultPlanner(
2086 - FileStore: &fakeFileStore{ 2087 + &fakeFileStore{
2087 blockCount: 1000, 2088 blockCount: 1000,
2088 PathsFn: func() []tsm1.FileStat { 2089 PathsFn: func() []tsm1.FileStat {
2089 return data 2090 return data
2090 }, 2091 },
2091 }, 2092 },
2092 - CompactFullWriteColdDuration: time.Hour, 2093 + time.Hour)
2093 - }
2094 2094
2095 tsm := cp.Plan(time.Now().Add(-24 * time.Hour)) 2095 tsm := cp.Plan(time.Now().Add(-24 * time.Hour))
2096 if exp, got := 1, len(tsm); got != exp { 2096 if exp, got := 1, len(tsm); got != exp {
...@@ -2127,15 +2127,17 @@ func TestDefaultPlanner_Plan_NotFullOverMaxsize(t *testing.T) { ...@@ -2127,15 +2127,17 @@ func TestDefaultPlanner_Plan_NotFullOverMaxsize(t *testing.T) {
2127 blockCount: 100, 2127 blockCount: 100,
2128 } 2128 }
2129 2129
2130 - cp := &tsm1.DefaultPlanner{ 2130 + cp := tsm1.NewDefaultPlanner(
2131 - FileStore: fs, 2131 + fs,
2132 - CompactFullWriteColdDuration: time.Nanosecond, 2132 + time.Nanosecond,
2133 - } 2133 + )
2134 2134
2135 + plan := cp.Plan(time.Now().Add(-time.Second))
2135 // first verify that our test set would return files 2136 // first verify that our test set would return files
2136 - if exp, got := 4, len(cp.Plan(time.Now().Add(-time.Second))[0]); got != exp { 2137 + if exp, got := 4, len(plan[0]); got != exp {
2137 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) 2138 t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
2138 } 2139 }
2140 + cp.Release(plan)
2139 2141
2140 // skip planning if all files are over the limit 2142 // skip planning if all files are over the limit
2141 over := []tsm1.FileStat{ 2143 over := []tsm1.FileStat{
...@@ -2188,13 +2190,13 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) { ...@@ -2188,13 +2190,13 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) {
2188 }, 2190 },
2189 } 2191 }
2190 2192
2191 - cp := &tsm1.DefaultPlanner{ 2193 + cp := tsm1.NewDefaultPlanner(
2192 - FileStore: &fakeFileStore{ 2194 + &fakeFileStore{
2193 PathsFn: func() []tsm1.FileStat { 2195 PathsFn: func() []tsm1.FileStat {
2194 return data 2196 return data
2195 }, 2197 },
2196 - }, 2198 + }, tsdb.DefaultCompactFullWriteColdDuration,
2197 - } 2199 + )
2198 2200
2199 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]} 2201 expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
2200 tsm := cp.Plan(time.Now()) 2202 tsm := cp.Plan(time.Now())
...@@ -2210,8 +2212,8 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) { ...@@ -2210,8 +2212,8 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) {
2210 } 2212 }
2211 2213
2212 func TestDefaultPlanner_Plan_LargeSets(t *testing.T) { 2214 func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
2213 - cp := &tsm1.DefaultPlanner{ 2215 + cp := tsm1.NewDefaultPlanner(
2214 - FileStore: &fakeFileStore{ 2216 + &fakeFileStore{
2215 PathsFn: func() []tsm1.FileStat { 2217 PathsFn: func() []tsm1.FileStat {
2216 return []tsm1.FileStat{ 2218 return []tsm1.FileStat{
2217 tsm1.FileStat{ 2219 tsm1.FileStat{
...@@ -2236,8 +2238,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) { ...@@ -2236,8 +2238,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
2236 }, 2238 },
2237 } 2239 }
2238 }, 2240 },
2239 - }, 2241 + }, tsdb.DefaultCompactFullWriteColdDuration,
2240 - } 2242 + )
2241 2243
2242 tsm := cp.Plan(time.Now()) 2244 tsm := cp.Plan(time.Now())
2243 if exp, got := 0, len(tsm); got != exp { 2245 if exp, got := 0, len(tsm); got != exp {
...@@ -2246,8 +2248,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) { ...@@ -2246,8 +2248,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
2246 } 2248 }
2247 2249
2248 func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) { 2250 func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) {
2249 - cp := &tsm1.DefaultPlanner{ 2251 + cp := tsm1.NewDefaultPlanner(
2250 - FileStore: &fakeFileStore{ 2252 + &fakeFileStore{
2251 PathsFn: func() []tsm1.FileStat { 2253 PathsFn: func() []tsm1.FileStat {
2252 return []tsm1.FileStat{ 2254 return []tsm1.FileStat{
2253 tsm1.FileStat{ 2255 tsm1.FileStat{
...@@ -2272,8 +2274,8 @@ func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) { ...@@ -2272,8 +2274,8 @@ func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) {
2272 }, 2274 },
2273 } 2275 }
2274 }, 2276 },
2275 - }, 2277 + }, tsdb.DefaultCompactFullWriteColdDuration,
2276 - } 2278 + )
2277 2279
2278 tsm := cp.Plan(time.Now()) 2280 tsm := cp.Plan(time.Now())
2279 if exp, got := 0, len(tsm); got != exp { 2281 if exp, got := 0, len(tsm); got != exp {
......
This diff is collapsed. Click to expand it.