Iker Narvaez

output variable test

Showing 117 changed files with 1818 additions and 1525 deletions
......@@ -4,4 +4,4 @@ memo = "efe4a26b5775ea537c0383b685d50fa64ee8fa6eec77406c5326d5f54744423f"
branch = "master"
name = "github.com/influxdata/influxdb"
packages = ["client/v2","models","pkg/escape"]
revision = "31db9d6f468239346a1fe7464b5cf9c85580488f"
revision = "a6c543039763c0f08253d71a43aefe3b570ecf37"
......
......@@ -2,6 +2,7 @@ package influx_client
import(
influx "github.com/influxdata/influxdb/client/v2"
"fmt"
"time"
)
......@@ -32,6 +33,7 @@ func (i *InfluxClient) CreatePoint(tableName string, tags map[string]string, fie
Precision: "s",
})
fmt.Println(i.Db)
pt, err := influx.NewPoint(tableName, tags, fields, timestamp)
if err != nil {
panic(err)
......
......@@ -25,17 +25,19 @@ __Additional info:__ [Include gist of relevant config, logs, etc.]
Also, if this is an issue of for performance, locking, etc the following commands are useful to create debug information for the team.
```
curl -o block.txt "http://localhost:8086/debug/pprof/block?debug=1"
curl -o goroutine.txt "http://localhost:8086/debug/pprof/goroutine?debug=1"
curl -o heap.txt "http://localhost:8086/debug/pprof/heap?debug=1"
curl -o vars.txt "http://localhost:8086/debug/vars"
curl -o profiles.tar.gz "http://localhost:8086/debug/pprof/all?cpu=true"
curl -o vars.txt "http://localhost:8086/debug/vars"
iostat -xd 1 30 > iostat.txt
influx -execute "show shards" > shards.txt
influx -execute "show stats" > stats.txt
influx -execute "show diagnostics" > diagnostics.txt
```
Please run those if possible and link them from a [gist](http://gist.github.com).
**Please note** It will take at least 30 seconds for the first cURL command above to return a response.
This is because it will run a CPU profile as part of its information gathering, which takes 30 seconds to collect.
Ideally you should run these commands when you're experiencing problems, so we can capture the state of the system at that time.
If you're concerned about running a CPU profile (which only has a small, temporary impact on performance), then you can set `?cpu=false` or omit `?cpu=true` altogether.
Please run those if possible and link them from a [gist](http://gist.github.com) or simply attach them as a comment to the issue.
*Please note, the quickest way to fix a bug is to open a Pull Request.*
......
## v1.3.0 [unreleased]
### Removals
The admin UI is removed and unusable in this release. The `[admin]` configuration section will be ignored.
### Configuration Changes
* The top-level config `bind-address` now defaults to `localhost:8088`.
The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet).
### Features
- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay
......@@ -15,6 +24,16 @@
- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL.
- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL.
- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language.
- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements
- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI.
- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1
- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits
- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling.
- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests.
- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability
- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive.
- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support.
- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator.
### Bugfixes
......@@ -27,6 +46,7 @@
- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions.
- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values.
- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec.
- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default
- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query
- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query
- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.
......@@ -42,15 +62,29 @@
- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding
- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean.
- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries.
- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions.
- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point.
- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries.
- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries.
- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries.
- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages
- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error
- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values.
- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB
- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file.
- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently
## v1.2.3 [unreleased]
### Bugfixes
- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history.
- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method
- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get
- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check.
- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors.
- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered.
- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points
## v1.2.2 [2017-03-14]
......@@ -134,7 +168,6 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco
### Bugfixes
- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine.
- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled.
- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory
......@@ -156,6 +189,7 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco
- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration.
- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs.
- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision....
## v1.1.4 [2017-02-27]
### Bugfixes
......
......@@ -32,7 +32,7 @@ curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode
```
**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report.
Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed.
Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [InfluxData Community](https://community.influxdata.com/), not filed as issues. Issues like this will be closed.
Feature requests
---------------
......@@ -69,7 +69,7 @@ second to sign our CLA, which can be found
Installing Go
-------------
InfluxDB requires Go 1.7.4.
InfluxDB requires Go 1.8.1
At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions
on how to install it see [the gvm page on github](https://github.com/moovweb/gvm).
......@@ -77,8 +77,8 @@ on how to install it see [the gvm page on github](https://github.com/moovweb/gvm
After installing gvm you can install and set the default go version by
running the following:
gvm install go1.7.4
gvm use go1.7.4 --default
gvm install go1.8.1
gvm use go1.8.1 --default
Installing GDM
-------------
......
# Docker Setup
========================
This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment.
## Building Image
To build a docker image for InfluxDB from your current checkout, run the following:
```
$ ./build-docker.sh
```
This script uses the `golang:1.7.4` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image.
To build the image using a different version of go:
```
$ GO_VER=1.7.4 ./build-docker.sh
```
Available version can be found [here](https://hub.docker.com/_/golang/).
## Single Node Container
This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually.
```
$ docker run -it -p 8086:8086 -p 8088:8088 influxdb
```
FROM busybox:ubuntu-14.04
MAINTAINER Jason Wilder "<jason@influxdb.com>"
# admin, http, udp, cluster, graphite, opentsdb, collectd
EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826
WORKDIR /app
# copy binary into image
COPY influxd /app/
# Add influxd to the PATH
ENV PATH=/app:$PATH
# Generate a default config
RUN influxd config > /etc/influxdb.toml
# Use /data for all disk storage
RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml
VOLUME ["/data"]
ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"]
......@@ -18,7 +18,7 @@ RUN gem install fpm
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.7.4
ENV GO_VERSION 1.8.1
ENV GO_ARCH 386
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
......@@ -21,7 +21,7 @@ RUN gem install fpm
# Install go
ENV GOPATH /root/go
ENV GO_VERSION 1.7.4
ENV GO_VERSION 1.8.1
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
......@@ -26,7 +26,7 @@ VOLUME $PROJECT_DIR
# Install go
ENV GO_VERSION 1.7.4
ENV GO_VERSION 1.8.1
ENV GO_ARCH amd64
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \
......
......@@ -3,7 +3,6 @@ github.com/BurntSushi/toml 99064174e013895bbd9b025c31100bd1d9b590ca
github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c
github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda
github.com/cespare/xxhash 4a94f899c20bc44d4f5f807cb14529e72aca99d6
github.com/clarkduvall/hyperloglog 2d38f733946d0a1f2e810513c71b834cbeba1480
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb
github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef
......@@ -14,7 +13,6 @@ github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967
github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815
github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447
github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac
github.com/rakyll/statik e383bbf6b2ec1a2fb8492dfd152d945fb88919b6
github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d
github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6
github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577
......
#!/bin/bash
set -e -x
GO_VER=${GO_VER:-1.7.4}
docker run -it -v "${GOPATH}":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd'
docker build -t influxdb .
......@@ -156,22 +156,6 @@ def package_man_files(build_root):
for f in files:
run("gzip -9n {}".format(os.path.join(path, f)))
def run_generate():
"""Run 'go generate' to rebuild any static assets.
"""
logging.info("Running 'go generate'...")
if not check_path_for("statik"):
run("go install github.com/rakyll/statik")
orig_path = None
if os.path.join(os.environ.get("GOPATH"), "bin") not in os.environ["PATH"].split(os.pathsep):
orig_path = os.environ["PATH"].split(os.pathsep)
os.environ["PATH"] = os.environ["PATH"].split(os.pathsep).append(os.path.join(os.environ.get("GOPATH"), "bin"))
run("rm -f ./services/admin/statik/statik.go")
run("go generate ./services/admin")
if orig_path is not None:
os.environ["PATH"] = orig_path
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
......@@ -803,10 +787,6 @@ def main(args):
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet, args.junit_report):
return 1
......@@ -977,9 +957,6 @@ if __name__ == '__main__':
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
......
......@@ -2,7 +2,7 @@ machine:
services:
- docker
environment:
GODIST: "go1.7.4.linux-amd64.tar.gz"
GODIST: "go1.8.1.linux-amd64.tar.gz"
post:
- mkdir -p download
- test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST
......
......@@ -324,7 +324,7 @@ func (p *Point) PrecisionString(precison string) string {
// Name returns the measurement name of the point.
func (p *Point) Name() string {
return p.pt.Name()
return string(p.pt.Name())
}
// Tags returns the tags associated with the point.
......
......@@ -284,9 +284,9 @@ func (cmd *Command) exportTSMFile(tsmFilePath string, w io.Writer) error {
continue
}
measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key)
field = escape.String(field)
field = escape.Bytes(field)
if err := cmd.writeValues(w, measurement, field, values); err != nil {
if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
// An error from writeValues indicates an IO error, which should be returned.
return err
}
......@@ -348,9 +348,9 @@ func (cmd *Command) exportWALFile(walFilePath string, w io.Writer, warnDelete fu
for key, values := range t.Values {
measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key))
// measurements are stored escaped, field names are not
field = escape.String(field)
field = escape.Bytes(field)
if err := cmd.writeValues(w, measurement, field, values); err != nil {
if err := cmd.writeValues(w, measurement, string(field), values); err != nil {
// An error from writeValues indicates an IO error, which should be returned.
return err
}
......
......@@ -290,6 +290,9 @@ func writeCorpusToWALFile(c corpus) *os.File {
panic(err)
}
if err := w.Flush(); err != nil {
panic(err)
}
// (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway.
if err := walFile.Sync(); err != nil {
panic(err)
......
......@@ -8,6 +8,7 @@ import (
"os"
"github.com/influxdata/influxdb/cmd"
"github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi"
"github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm"
"github.com/influxdata/influxdb/cmd/influx_inspect/export"
"github.com/influxdata/influxdb/cmd/influx_inspect/help"
......@@ -53,6 +54,11 @@ func (m *Main) Run(args ...string) error {
if err := help.NewCommand().Run(args...); err != nil {
return fmt.Errorf("help: %s", err)
}
case "dumptsi":
name := dumptsi.NewCommand()
if err := name.Run(args...); err != nil {
return fmt.Errorf("dumptsi: %s", err)
}
case "dumptsmdev":
fmt.Fprintf(m.Stderr, "warning: dumptsmdev is deprecated, use dumptsm instead.\n")
fallthrough
......
......@@ -7,6 +7,7 @@ import (
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/tabwriter"
......@@ -67,14 +68,14 @@ func (cmd *Command) Run(args ...string) error {
}
if len(files) == 0 {
return fmt.Errorf("no tsm files at %v\n", cmd.dir)
return fmt.Errorf("no tsm files at %v", cmd.dir)
}
tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0)
fmt.Fprintln(tw, strings.Join([]string{"File", "Series", "Load Time"}, "\t"))
totalSeries := hllpp.New()
tagCardialities := map[string]*hllpp.HLLPP{}
tagCardinalities := map[string]*hllpp.HLLPP{}
measCardinalities := map[string]*hllpp.HLLPP{}
fieldCardinalities := map[string]*hllpp.HLLPP{}
......@@ -118,10 +119,10 @@ func (cmd *Command) Run(args ...string) error {
fieldCount.Add([]byte(field))
for _, t := range tags {
tagCount, ok := tagCardialities[string(t.Key)]
tagCount, ok := tagCardinalities[string(t.Key)]
if !ok {
tagCount = hllpp.New()
tagCardialities[string(t.Key)] = tagCount
tagCardinalities[string(t.Key)] = tagCount
}
tagCount.Add(t.Value)
}
......@@ -140,22 +141,23 @@ func (cmd *Command) Run(args ...string) error {
tw.Flush()
println()
fmt.Printf("Statistics\n")
fmt.Printf(" Series:\n")
fmt.Printf(" Total (est): %d\n", totalSeries.Count())
fmt.Printf("\tSeries:\n")
fmt.Printf("\t\tTotal (est): %d\n", totalSeries.Count())
if cmd.detailed {
fmt.Printf(" Measurements (est):\n")
for t, card := range measCardinalities {
fmt.Printf(" %v: %d (%d%%)\n", t, card.Count(), int((float64(card.Count())/float64(totalSeries.Count()))*100))
fmt.Printf("\tMeasurements (est):\n")
for _, t := range sortKeys(measCardinalities) {
fmt.Printf("\t\t%v: %d (%d%%)\n", t, measCardinalities[t].Count(), int((float64(measCardinalities[t].Count())/float64(totalSeries.Count()))*100))
}
fmt.Printf(" Fields (est):\n")
for t, card := range fieldCardinalities {
fmt.Printf(" %v: %d\n", t, card.Count())
fmt.Printf("\tFields (est):\n")
for _, t := range sortKeys(fieldCardinalities) {
fmt.Printf("\t\t%v: %d\n", t, fieldCardinalities[t].Count())
}
fmt.Printf(" Tags (est):\n")
for t, card := range tagCardialities {
fmt.Printf(" %v: %d\n", t, card.Count())
fmt.Printf("\tTags (est):\n")
for _, t := range sortKeys(tagCardinalities) {
fmt.Printf("\t\t%v: %d\n", t, tagCardinalities[t].Count())
}
}
......@@ -163,6 +165,16 @@ func (cmd *Command) Run(args ...string) error {
return nil
}
// sortKeys is a quick helper to return the sorted set of a map's keys
func sortKeys(vals map[string]*hllpp.HLLPP) (keys []string) {
for k := range vals {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// printUsage prints the usage message to STDERR.
func (cmd *Command) printUsage() {
usage := `Displays shard level report.
......
......@@ -18,7 +18,6 @@ import (
"github.com/influxdata/influxdb/coordinator"
"github.com/influxdata/influxdb/monitor"
"github.com/influxdata/influxdb/monitor/diagnostics"
"github.com/influxdata/influxdb/services/admin"
"github.com/influxdata/influxdb/services/collectd"
"github.com/influxdata/influxdb/services/continuous_querier"
"github.com/influxdata/influxdb/services/graphite"
......@@ -34,7 +33,7 @@ import (
const (
// DefaultBindAddress is the default address for various RPC services.
DefaultBindAddress = ":8088"
DefaultBindAddress = "127.0.0.1:8088"
)
// Config represents the configuration format for the influxd binary.
......@@ -45,7 +44,6 @@ type Config struct {
Retention retention.Config `toml:"retention"`
Precreator precreator.Config `toml:"shard-precreation"`
Admin admin.Config `toml:"admin"`
Monitor monitor.Config `toml:"monitor"`
Subscriber subscriber.Config `toml:"subscriber"`
HTTPD httpd.Config `toml:"http"`
......@@ -71,7 +69,6 @@ func NewConfig() *Config {
c.Coordinator = coordinator.NewConfig()
c.Precreator = precreator.NewConfig()
c.Admin = admin.NewConfig()
c.Monitor = monitor.NewConfig()
c.Subscriber = subscriber.NewConfig()
c.HTTPD = httpd.NewConfig()
......
......@@ -22,9 +22,6 @@ dir = "/tmp/data"
[coordinator]
[admin]
bind-address = ":8083"
[http]
bind-address = ":8087"
......@@ -69,8 +66,6 @@ enabled = true
t.Fatalf("unexpected meta dir: %s", c.Meta.Dir)
} else if c.Data.Dir != "/tmp/data" {
t.Fatalf("unexpected data dir: %s", c.Data.Dir)
} else if c.Admin.BindAddress != ":8083" {
t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress)
} else if c.HTTPD.BindAddress != ":8087" {
t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress)
} else if len(c.GraphiteInputs) != 2 {
......
......@@ -16,7 +16,6 @@ import (
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/monitor"
"github.com/influxdata/influxdb/services/admin"
"github.com/influxdata/influxdb/services/collectd"
"github.com/influxdata/influxdb/services/continuous_querier"
"github.com/influxdata/influxdb/services/graphite"
......@@ -252,15 +251,6 @@ func (s *Server) appendRetentionPolicyService(c retention.Config) {
s.Services = append(s.Services, srv)
}
func (s *Server) appendAdminService(c admin.Config) {
if !c.Enabled {
return
}
c.Version = s.buildInfo.Version
srv := admin.NewService(c)
s.Services = append(s.Services, srv)
}
func (s *Server) appendHTTPDService(c httpd.Config) {
if !c.Enabled {
return
......@@ -374,7 +364,6 @@ func (s *Server) Open() error {
s.appendMonitorService()
s.appendPrecreatorService(s.config.Precreator)
s.appendSnapshotterService()
s.appendAdminService(s.config.Admin)
s.appendContinuousQueryService(s.config.ContinuousQuery)
s.appendHTTPDService(s.config.HTTPD)
s.appendRetentionPolicyService(s.config.Retention)
......
......@@ -98,13 +98,16 @@ func NewPointsWriter() *PointsWriter {
// ShardMapping contains a mapping of shards to points.
type ShardMapping struct {
Points map[uint64][]models.Point // The points associated with a shard ID
Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID
n int
Points map[uint64][]models.Point // The points associated with a shard ID
Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID
Dropped []models.Point // Points that were dropped
}
// NewShardMapping creates an empty ShardMapping.
func NewShardMapping() *ShardMapping {
func NewShardMapping(n int) *ShardMapping {
return &ShardMapping{
n: n,
Points: map[uint64][]models.Point{},
Shards: map[uint64]*meta.ShardInfo{},
}
......@@ -112,6 +115,9 @@ func NewShardMapping() *ShardMapping {
// MapPoint adds the point to the ShardMapping, associated with the given shardInfo.
func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) {
if cap(s.Points[shardInfo.ID]) < s.n {
s.Points[shardInfo.ID] = make([]models.Point, 0, s.n)
}
s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p)
s.Shards[shardInfo.ID] = shardInfo
}
......@@ -218,12 +224,13 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error)
list = list.Append(*sg)
}
mapping := NewShardMapping()
mapping := NewShardMapping(len(wp.Points))
for _, p := range wp.Points {
sg := list.ShardGroupAt(p.Time())
if sg == nil {
// We didn't create a shard group because the point was outside the
// scope of the RP.
mapping.Dropped = append(mapping.Dropped, p)
atomic.AddInt64(&w.stats.WriteDropped, 1)
continue
}
......@@ -319,6 +326,10 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency
atomic.AddInt64(&w.stats.SubWriteDrop, 1)
}
if err == nil && len(shardMappings.Dropped) > 0 {
err = tsdb.PartialWriteError{Reason: "points beyond retention policy", Dropped: len(shardMappings.Dropped)}
}
timeout := time.NewTimer(w.WriteTimeout)
defer timeout.Stop()
for range shardMappings.Points {
......@@ -335,7 +346,7 @@ func (w *PointsWriter) WritePoints(database, retentionPolicy string, consistency
}
}
}
return nil
return err
}
// writeToShards writes points to a shard.
......
......@@ -12,6 +12,7 @@ import (
"github.com/influxdata/influxdb/coordinator"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/services/meta"
"github.com/influxdata/influxdb/tsdb"
)
// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient.
......@@ -234,8 +235,12 @@ func TestPointsWriter_MapShards_Invalid(t *testing.T) {
t.Fatalf("unexpected an error: %v", err)
}
if exp := 0; len(shardMappings.Points) != exp {
t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp)
if got, exp := len(shardMappings.Points), 0; got != exp {
t.Errorf("MapShards() len mismatch. got %v, exp %v", got, exp)
}
if got, exp := len(shardMappings.Dropped), 1; got != exp {
t.Fatalf("MapShard() dropped mismatch: got %v, exp %v", got, exp)
}
}
......@@ -286,7 +291,7 @@ func TestPointsWriter_WritePoints(t *testing.T) {
// copy to prevent data race
theTest := test
sm := coordinator.NewShardMapping()
sm := coordinator.NewShardMapping(16)
sm.MapPoint(
&meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{
{NodeID: 1},
......@@ -365,6 +370,64 @@ func TestPointsWriter_WritePoints(t *testing.T) {
}
}
func TestPointsWriter_WritePoints_Dropped(t *testing.T) {
pr := &coordinator.WritePointsRequest{
Database: "mydb",
RetentionPolicy: "myrp",
}
// Ensure that the test shard groups are created before the points
// are created.
ms := NewPointsWriterMetaClient()
// Three points that range over the shardGroup duration (1h) and should map to two
// distinct shards
pr.AddPoint("cpu", 1.0, time.Now().Add(-24*time.Hour), nil)
// copy to prevent data race
sm := coordinator.NewShardMapping(16)
// ShardMapper dropped this point
sm.Dropped = append(sm.Dropped, pr.Points[0])
// Local coordinator.Node ShardWriter
// lock on the write increment since these functions get called in parallel
var mu sync.Mutex
store := &fakeStore{
WriteFn: func(shardID uint64, points []models.Point) error {
mu.Lock()
defer mu.Unlock()
return nil
},
}
ms.DatabaseFn = func(database string) *meta.DatabaseInfo {
return nil
}
ms.NodeIDFn = func() uint64 { return 1 }
subPoints := make(chan *coordinator.WritePointsRequest, 1)
sub := Subscriber{}
sub.PointsFn = func() chan<- *coordinator.WritePointsRequest {
return subPoints
}
c := coordinator.NewPointsWriter()
c.MetaClient = ms
c.TSDBStore = store
c.Subscriber = sub
c.Node = &influxdb.Node{ID: 1}
c.Open()
defer c.Close()
err := c.WritePoints(pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points)
if _, ok := err.(tsdb.PartialWriteError); !ok {
t.Errorf("PointsWriter.WritePoints(): got %v, exp %v", err, tsdb.PartialWriteError{})
}
}
type fakePointsWriter struct {
WritePointsIntoFn func(*coordinator.IntoWriteRequest) error
}
......
......@@ -2,7 +2,7 @@
# The values in this file override the default values used by the system if
# a config option is not specified. The commented out lines are the configuration
# field and the default value used. Uncommentting a line and changing the value
# field and the default value used. Uncommenting a line and changing the value
# will change the value used at runtime when the process is restarted.
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
......@@ -11,10 +11,8 @@
# Change this option to true to disable reporting.
# reporting-disabled = false
# we'll try to get the hostname automatically, but if it the os returns something
# that isn't resolvable by other servers in the cluster, use this option to
# manually set the hostname
# hostname = "localhost"
# Bind address to use for the RPC service for backup and restore.
# bind-address = "127.0.0.1:8088"
###
### [meta]
......@@ -89,6 +87,11 @@
# write or delete
# compact-full-write-cold-duration = "4h"
# The maximum number of concurrent full and level compactions that can run at one time. A
# value of 0 results in runtime.GOMAXPROCS(0) used at runtime. This setting does not apply
# to cache snapshotting.
# max-concurrent-compactions = 0
# The maximum series allowed per database before writes are dropped. This limit can prevent
# high cardinality issues at the database level. This limit can be disabled by setting it to
# 0.
......@@ -118,7 +121,7 @@
# can help prevent run away queries. Setting the value to 0 disables the limit.
# query-timeout = "0s"
# The the time threshold when a query will be logged as a slow query. This limit can be set to help
# The time threshold when a query will be logged as a slow query. This limit can be set to help
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
# log-queries-after = "0s"
......@@ -128,12 +131,9 @@
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
# count unlimited.
# The maximum number of series a SELECT can run. A value of zero will make the maximum series
# count unlimited.
# max-select-series = 0
# The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum
# The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum
# number of buckets unlimited.
# max-select-buckets = 0
......@@ -189,27 +189,6 @@
# store-interval = "10s"
###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###
### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release.
[admin]
# Determines whether the admin service is enabled.
# enabled = false
# The default bind address used by the admin service.
# bind-address = ":8083"
# Whether the admin service should use HTTPS.
# https-enabled = false
# The SSL certificate used when HTTPS is enabled.
# https-certificate = "/etc/ssl/influxdb.pem"
###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
......@@ -223,13 +202,13 @@
# The bind address used by the HTTP service.
# bind-address = ":8086"
# Determines whether HTTP authentication is enabled.
# Determines whether user authentication is enabled over HTTP/HTTPS.
# auth-enabled = false
# The default realm sent back when issuing a basic auth challenge.
# realm = "InfluxDB"
# Determines whether HTTP request logging is enable.d
# Determines whether HTTP request logging is enabled.
# log-enabled = true
# Determines whether detailed write logging is enabled.
......
......@@ -1761,6 +1761,10 @@ func (s *SelectStatement) validate(tr targetRequirement) error {
return err
}
if err := s.validateTopBottom(); err != nil {
return err
}
if err := s.validateAggregates(tr); err != nil {
return err
}
......@@ -2247,7 +2251,7 @@ func (s *SelectStatement) validateDistinct() error {
}
if len(s.Fields) > 1 {
return fmt.Errorf("aggregate function distinct() can not be combined with other functions or fields")
return fmt.Errorf("aggregate function distinct() cannot be combined with other functions or fields")
}
switch c := s.Fields[0].Expr.(type) {
......@@ -2263,6 +2267,19 @@ func (s *SelectStatement) validateDistinct() error {
return nil
}
func (s *SelectStatement) validateTopBottom() error {
// Ensure there are not multiple calls if top/bottom is present.
info := newSelectInfo(s)
if len(info.calls) > 1 {
for call := range info.calls {
if call.Name == "top" || call.Name == "bottom" {
return fmt.Errorf("selector function %s() cannot be combined with other functions", call.Name)
}
}
}
return nil
}
// GroupByInterval extracts the time interval, if specified.
func (s *SelectStatement) GroupByInterval() (time.Duration, error) {
// return if we've already pulled it out
......@@ -4084,7 +4101,7 @@ func TimeRange(expr Expr) (min, max time.Time, err error) {
// TimeRangeAsEpochNano returns the minimum and maximum times, as epoch nano, specified by
// an expression. If there is no lower bound, the minimum time is returned
// for minimum. If there is no higher bound, now is returned for maximum.
// for minimum. If there is no higher bound, the maximum time is returned.
func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) {
tmin, tmax, err := TimeRange(expr)
if err != nil {
......@@ -4097,7 +4114,7 @@ func TimeRangeAsEpochNano(expr Expr) (min, max int64, err error) {
min = tmin.UnixNano()
}
if tmax.IsZero() {
max = time.Now().UnixNano()
max = time.Unix(0, MaxTime).UnixNano()
} else {
max = tmax.UnixNano()
}
......@@ -4283,7 +4300,15 @@ func Rewrite(r Rewriter, node Node) Node {
n.Fields = Rewrite(r, n.Fields).(Fields)
n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions)
n.Sources = Rewrite(r, n.Sources).(Sources)
n.Condition = Rewrite(r, n.Condition).(Expr)
// Rewrite may return nil. Nil does not satisfy the Expr
// interface. We only assert the rewritten result to be an
// Expr if it is not nil:
if cond := Rewrite(r, n.Condition); cond != nil {
n.Condition = cond.(Expr)
} else {
n.Condition = nil
}
case *SubQuery:
n.Statement = Rewrite(r, n.Statement).(*SelectStatement)
......
package influxql
import (
"bytes"
"container/heap"
"fmt"
"math"
"sort"
......@@ -783,19 +781,17 @@ func IntegerSpreadReduceSlice(a []IntegerPoint) []IntegerPoint {
return []IntegerPoint{{Time: ZeroTime, Value: max - min}}
}
func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) {
func newTopIterator(input Iterator, opt IteratorOptions, n int) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
aggregateFn := NewFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(aggregateFn)
fn := NewFloatTopReducer(n)
return fn, fn
}
return newFloatReduceFloatIterator(input, opt, createFn), nil
case IntegerIterator:
aggregateFn := NewIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(aggregateFn)
fn := NewIntegerTopReducer(n)
return fn, fn
}
return newIntegerReduceIntegerIterator(input, opt, createFn), nil
......@@ -804,111 +800,17 @@ func newTopIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags
}
}
// NewFloatTopReduceSliceFunc returns the top values within a window.
func NewFloatTopReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
return func(a []FloatPoint) []FloatPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(floatPointsByTime(points))
}
return points
}
}
// NewIntegerTopReduceSliceFunc returns the top values within a window.
func NewIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
return func(a []IntegerPoint) []IntegerPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(integerPointsByTime(points))
}
return points
}
}
func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, tags []int) (Iterator, error) {
func newBottomIterator(input Iterator, opt IteratorOptions, n int) (Iterator, error) {
switch input := input.(type) {
case FloatIterator:
aggregateFn := NewFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (FloatPointAggregator, FloatPointEmitter) {
fn := NewFloatSliceFuncReducer(aggregateFn)
fn := NewFloatBottomReducer(n)
return fn, fn
}
return newFloatReduceFloatIterator(input, opt, createFn), nil
case IntegerIterator:
aggregateFn := NewIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)
createFn := func() (IntegerPointAggregator, IntegerPointEmitter) {
fn := NewIntegerSliceFuncReducer(aggregateFn)
fn := NewIntegerBottomReducer(n)
return fn, fn
}
return newIntegerReduceIntegerIterator(input, opt, createFn), nil
......@@ -917,158 +819,6 @@ func newBottomIterator(input Iterator, opt IteratorOptions, n *IntegerLiteral, t
}
}
// NewFloatBottomReduceSliceFunc returns the bottom values within a window.
func NewFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) FloatReduceSliceFunc {
return func(a []FloatPoint) []FloatPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(floatPointsByTime(points))
}
return points
}
}
// NewIntegerBottomReduceSliceFunc returns the bottom values within a window.
func NewIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) IntegerReduceSliceFunc {
return func(a []IntegerPoint) []IntegerPoint {
// Filter by tags if they exist.
if len(tags) > 0 {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = ZeroTime
}
} else {
sort.Stable(integerPointsByTime(points))
}
return points
}
}
func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint {
pointMap := make(map[string]FloatPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]FloatPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint {
pointMap := make(map[string]IntegerPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]IntegerPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
// newPercentileIterator returns an iterator for operating on a percentile() call.
func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) {
switch input := input.(type) {
......
package influxql
import (
"container/heap"
"math"
"sort"
"time"
"github.com/influxdata/influxdb/influxql/neldermead"
......@@ -987,3 +989,175 @@ func (r *IntegerIntegralReducer) Close() error {
close(r.ch)
return nil
}
type FloatTopReducer struct {
h *floatPointsByFunc
}
func NewFloatTopReducer(n int) *FloatTopReducer {
return &FloatTopReducer{
h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time > b.Time
}),
}
}
func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) {
if r.h.Len() == cap(r.h.points) {
// Compare the minimum point and the aggregated point. If our value is
// larger, replace the current min value.
if !r.h.cmp(&r.h.points[0], p) {
return
}
r.h.points[0] = *p
heap.Fix(r.h, 0)
return
}
heap.Push(r.h, *p)
}
func (r *FloatTopReducer) Emit() []FloatPoint {
// Ensure the points are sorted with the maximum value last. While the
// first point may be the minimum value, the rest is not guaranteed to be
// in any particular order while it is a heap.
points := make([]FloatPoint, len(r.h.points))
for i, p := range r.h.points {
p.Aggregated = 0
points[i] = p
}
h := floatPointsByFunc{points: points, cmp: r.h.cmp}
sort.Sort(sort.Reverse(&h))
return points
}
type IntegerTopReducer struct {
h *integerPointsByFunc
}
func NewIntegerTopReducer(n int) *IntegerTopReducer {
return &IntegerTopReducer{
h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time > b.Time
}),
}
}
func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) {
if r.h.Len() == cap(r.h.points) {
// Compare the minimum point and the aggregated point. If our value is
// larger, replace the current min value.
if !r.h.cmp(&r.h.points[0], p) {
return
}
r.h.points[0] = *p
heap.Fix(r.h, 0)
return
}
heap.Push(r.h, *p)
}
func (r *IntegerTopReducer) Emit() []IntegerPoint {
// Ensure the points are sorted with the maximum value last. While the
// first point may be the minimum value, the rest is not guaranteed to be
// in any particular order while it is a heap.
points := make([]IntegerPoint, len(r.h.points))
for i, p := range r.h.points {
p.Aggregated = 0
points[i] = p
}
h := integerPointsByFunc{points: points, cmp: r.h.cmp}
sort.Sort(sort.Reverse(&h))
return points
}
type FloatBottomReducer struct {
h *floatPointsByFunc
}
func NewFloatBottomReducer(n int) *FloatBottomReducer {
return &FloatBottomReducer{
h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time > b.Time
}),
}
}
func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) {
if r.h.Len() == cap(r.h.points) {
// Compare the minimum point and the aggregated point. If our value is
// larger, replace the current min value.
if !r.h.cmp(&r.h.points[0], p) {
return
}
r.h.points[0] = *p
heap.Fix(r.h, 0)
return
}
heap.Push(r.h, *p)
}
func (r *FloatBottomReducer) Emit() []FloatPoint {
// Ensure the points are sorted with the maximum value last. While the
// first point may be the minimum value, the rest is not guaranteed to be
// in any particular order while it is a heap.
points := make([]FloatPoint, len(r.h.points))
for i, p := range r.h.points {
p.Aggregated = 0
points[i] = p
}
h := floatPointsByFunc{points: points, cmp: r.h.cmp}
sort.Sort(sort.Reverse(&h))
return points
}
type IntegerBottomReducer struct {
h *integerPointsByFunc
}
func NewIntegerBottomReducer(n int) *IntegerBottomReducer {
return &IntegerBottomReducer{
h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time > b.Time
}),
}
}
func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) {
if r.h.Len() == cap(r.h.points) {
// Compare the minimum point and the aggregated point. If our value is
// larger, replace the current min value.
if !r.h.cmp(&r.h.points[0], p) {
return
}
r.h.points[0] = *p
heap.Fix(r.h, 0)
return
}
heap.Push(r.h, *p)
}
func (r *IntegerBottomReducer) Emit() []IntegerPoint {
// Ensure the points are sorted with the maximum value last. While the
// first point may be the minimum value, the rest is not guaranteed to be
// in any particular order while it is a heap.
points := make([]IntegerPoint, len(r.h.points))
for i, p := range r.h.points {
p.Aggregated = 0
points[i] = p
}
h := integerPointsByFunc{points: points, cmp: r.h.cmp}
sort.Sort(sort.Reverse(&h))
return points
}
......
......@@ -1063,7 +1063,13 @@ type {{$k.name}}Reduce{{$v.Name}}Point struct {
// The previous value for the dimension is passed to fn.
func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) {
// Calculate next window.
var startTime, endTime int64
var (
startTime, endTime int64
window struct {
name string
tags string
}
)
for {
p, err := itr.input.Next()
if err != nil || p == nil {
......@@ -1075,6 +1081,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
// Unread the point so it can be processed.
itr.input.unread(p)
startTime, endTime = itr.opt.Window(p.Time)
window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID()
break
}
......@@ -1089,14 +1096,25 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
break
} else if curr.Nil {
continue
} else if curr.Name != window.name {
itr.input.unread(curr)
break
}
tags := curr.Tags.Subset(itr.dims)
id := curr.Name
if len(tags.m) > 0 {
id += "\x00" + tags.ID()
// Ensure this point is within the same final window.
if curr.Name != window.name {
itr.input.unread(curr)
break
} else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags {
itr.input.unread(curr)
break
}
// Retrieve the tags on this point for this level of the query.
// This may be different than the bucket dimensions.
tags := curr.Tags.Subset(itr.dims)
id := tags.ID()
// Retrieve the aggregator for this name/tag combination or create one.
rp := m[id]
if rp == nil {
......@@ -1112,17 +1130,18 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, e
rp.Aggregator.Aggregate{{$k.Name}}(curr)
}
// Reverse sort points by name & tag.
// Reverse sort points by name & tag if our output is supposed to be ordered.
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
if len(keys) > 1 {
if len(keys) > 1 && itr.opt.Ordered {
sort.Sort(reverseStringSlice(keys))
}
// Assume the points are already sorted until proven otherwise.
sortedByTime := true
// Emit the points for each name & tag combination.
a := make([]{{$v.Name}}Point, 0, len(m))
for _, k := range keys {
rp := m[k]
......@@ -1484,6 +1503,70 @@ type {{$k.name}}DedupeIterator struct {
m map[string]struct{} // lookup of points already sent
}
type {{$k.name}}IteratorMapper struct {
e *Emitter
buf []interface{}
driver IteratorMap // which iterator to use for the primary value, can be nil
fields []IteratorMap // which iterator to use for an aux field
point {{$k.Name}}Point
}
func new{{$k.Name}}IteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper {
e := NewEmitter(itrs, opt.Ascending, 0)
e.OmitTime = true
return &{{$k.name}}IteratorMapper{
e: e,
buf: make([]interface{}, len(itrs)),
driver: driver,
fields: fields,
point: {{$k.Name}}Point{
Aux: make([]interface{}, len(fields)),
},
}
}
func (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) {
t, name, tags, err := itr.e.loadBuf()
if err != nil || t == ZeroTime {
return nil, err
}
itr.point.Time = t
itr.point.Name = name
itr.point.Tags = tags
itr.e.readInto(t, name, tags, itr.buf)
if itr.driver != nil {
if v := itr.driver.Value(tags, itr.buf); v != nil {
if v, ok := v.({{$k.Type}}); ok {
itr.point.Value = v
itr.point.Nil = false
} else {
itr.point.Value = {{$k.Nil}}
itr.point.Nil = true
}
} else {
itr.point.Value = {{$k.Nil}}
itr.point.Nil = true
}
}
for i, f := range itr.fields {
itr.point.Aux[i] = f.Value(tags, itr.buf)
}
return &itr.point, nil
}
func (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats {
stats := IteratorStats{}
for _, itr := range itr.e.itrs {
stats.Add(itr.Stats())
}
return stats
}
func (itr *{{$k.name}}IteratorMapper) Close() error {
return itr.e.Close()
}
type {{$k.name}}FilterIterator struct {
input {{$k.Name}}Iterator
cond Expr
......
......@@ -740,8 +740,9 @@ func newIteratorOptionsStmt(stmt *SelectStatement, sopt *SelectOptions) (opt Ite
}
opt.Interval.Duration = interval
// Determine if the input for this select call must be ordered.
opt.Ordered = stmt.IsRawQuery
// Always request an ordered output for the top level iterators.
// The emitter will always emit points as ordered.
opt.Ordered = true
// Determine dimensions.
opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions))
......@@ -805,17 +806,15 @@ func newIteratorOptionsSubstatement(stmt *SelectStatement, opt IteratorOptions)
subOpt.Fill = NoFill
}
// Determine if the input to this iterator needs to be ordered so it outputs
// the correct order to the outer query.
interval, err := stmt.GroupByInterval()
if err != nil {
return IteratorOptions{}, err
}
subOpt.Ordered = opt.Ordered && (interval == 0 && stmt.HasSelector())
// Inherit the ordering method from the outer query.
subOpt.Ordered = opt.Ordered
// If there is no interval for this subquery, but the outer query has an
// interval, inherit the parent interval.
if interval == 0 {
interval, err := stmt.GroupByInterval()
if err != nil {
return IteratorOptions{}, err
} else if interval == 0 {
subOpt.Interval = opt.Interval
}
return subOpt, nil
......
package influxql
type iteratorMapper struct {
e *Emitter
buf []interface{}
fields []IteratorMap // which iterator to use for an aux field
auxFields []interface{}
}
import "fmt"
type IteratorMap interface {
Value(tags Tags, buf []interface{}) interface{}
......@@ -19,43 +14,31 @@ type TagMap string
func (s TagMap) Value(tags Tags, buf []interface{}) interface{} { return tags.Value(string(s)) }
func NewIteratorMapper(itrs []Iterator, fields []IteratorMap, opt IteratorOptions) Iterator {
e := NewEmitter(itrs, opt.Ascending, 0)
e.OmitTime = true
return &iteratorMapper{
e: e,
buf: make([]interface{}, len(itrs)),
fields: fields,
auxFields: make([]interface{}, len(fields)),
}
}
func (itr *iteratorMapper) Next() (*FloatPoint, error) {
t, name, tags, err := itr.e.loadBuf()
if err != nil || t == ZeroTime {
return nil, err
}
itr.e.readInto(t, name, tags, itr.buf)
for i, f := range itr.fields {
itr.auxFields[i] = f.Value(tags, itr.buf)
type NullMap struct{}
func (NullMap) Value(tags Tags, buf []interface{}) interface{} { return nil }
func NewIteratorMapper(itrs []Iterator, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator {
if driver != nil {
switch driver := driver.(type) {
case FieldMap:
switch itrs[int(driver)].(type) {
case FloatIterator:
return newFloatIteratorMapper(itrs, driver, fields, opt)
case IntegerIterator:
return newIntegerIteratorMapper(itrs, driver, fields, opt)
case StringIterator:
return newStringIteratorMapper(itrs, driver, fields, opt)
case BooleanIterator:
return newBooleanIteratorMapper(itrs, driver, fields, opt)
default:
panic(fmt.Sprintf("unable to map iterator type: %T", itrs[int(driver)]))
}
case TagMap:
return newStringIteratorMapper(itrs, driver, fields, opt)
default:
panic(fmt.Sprintf("unable to create iterator mapper with driveression type: %T", driver))
}
}
return &FloatPoint{
Name: name,
Tags: tags,
Time: t,
Aux: itr.auxFields,
}, nil
}
func (itr *iteratorMapper) Stats() IteratorStats {
stats := IteratorStats{}
for _, itr := range itr.e.itrs {
stats.Add(itr.Stats())
}
return stats
}
func (itr *iteratorMapper) Close() error {
return itr.e.Close()
return newFloatIteratorMapper(itrs, nil, fields, opt)
}
......
......@@ -31,7 +31,7 @@ func TestIteratorMapper(t *testing.T) {
{Val: "val2", Type: influxql.String},
},
}
itr := influxql.NewIteratorMapper(inputs, []influxql.IteratorMap{
itr := influxql.NewIteratorMapper(inputs, nil, []influxql.IteratorMap{
influxql.FieldMap(0),
influxql.FieldMap(1),
influxql.TagMap("host"),
......
......@@ -2838,6 +2838,14 @@ func ParseDuration(s string) (time.Duration, error) {
// Otherwise just use the last character as the unit of measure.
unit = string(a[i])
switch a[i] {
case 'n':
if i+1 < len(a) && a[i+1] == 's' {
unit = string(a[i : i+2])
d += time.Duration(n)
i += 2
continue
}
return 0, ErrInvalidDuration
case 'u', 'µ':
d += time.Duration(n) * time.Microsecond
case 'm':
......
......@@ -2606,12 +2606,12 @@ func TestParser_ParseStatement(t *testing.T) {
{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`},
{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
{s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`},
{s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`},
{s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`},
{s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`},
{s: `SELECT distinct FROM myseries`, err: `found FROM, expected identifier at line 1, char 17`},
{s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() can not be combined with other functions or fields`},
{s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`},
{s: `SELECT count(distinct) FROM myseries`, err: `found ), expected (, identifier at line 1, char 22`},
{s: `SELECT count(distinct field1, field2) FROM myseries`, err: `count(distinct <field>) can only have one argument`},
{s: `select count(distinct(too, many, arguments)) from myseries`, err: `count(distinct <field>) can only have one argument`},
......@@ -2680,6 +2680,8 @@ func TestParser_ParseStatement(t *testing.T) {
{s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `binary expressions cannot mix aggregates and raw fields`},
{s: `SELECT mean(value) FROM cpu FILL + value`, err: `fill must be a function call`},
{s: `SELECT sum(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(1h))`, err: `aggregate functions with GROUP BY time require a WHERE time clause`},
{s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`},
{s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`},
// See issues https://github.com/influxdata/influxdb/issues/1647
// and https://github.com/influxdata/influxdb/issues/4404
//{s: `DELETE`, err: `found EOF, expected FROM at line 1, char 8`},
......@@ -3250,6 +3252,7 @@ func TestParseDuration(t *testing.T) {
d time.Duration
err string
}{
{s: `10ns`, d: 10},
{s: `10u`, d: 10 * time.Microsecond},
{s: `10µ`, d: 10 * time.Microsecond},
{s: `15ms`, d: 15 * time.Millisecond},
......@@ -3270,6 +3273,7 @@ func TestParseDuration(t *testing.T) {
{s: `ms`, err: "invalid duration"},
{s: `1.2w`, err: "invalid duration"},
{s: `10x`, err: "invalid duration"},
{s: `10n`, err: "invalid duration"},
}
for i, tt := range tests {
......
package influxql
type subqueryBuilder struct {
ic IteratorCreator
stmt *SelectStatement
}
// buildAuxIterator constructs an auxiliary Iterator from a subquery.
func (b *subqueryBuilder) buildAuxIterator(opt IteratorOptions) (Iterator, error) {
// Retrieve a list of fields needed for conditions.
auxFields := opt.Aux
conds := ExprNames(opt.Condition)
if len(conds) > 0 {
auxFields = make([]VarRef, len(opt.Aux)+len(conds))
copy(auxFields, opt.Aux)
copy(auxFields[len(opt.Aux):], conds)
}
// Map the desired auxiliary fields from the substatement.
indexes := b.mapAuxFields(auxFields)
subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
if err != nil {
return nil, err
}
subOpt.Aux = auxFields
itrs, err := buildIterators(b.stmt, b.ic, subOpt)
if err != nil {
return nil, err
}
// Construct the iterators for the subquery.
input := NewIteratorMapper(itrs, nil, indexes, subOpt)
// If there is a condition, filter it now.
if opt.Condition != nil {
input = NewFilterIterator(input, opt.Condition, subOpt)
}
return input, nil
}
func (b *subqueryBuilder) mapAuxFields(auxFields []VarRef) []IteratorMap {
indexes := make([]IteratorMap, len(auxFields))
for i, name := range auxFields {
m := b.mapAuxField(&name)
if m == nil {
// If this field doesn't map to anything, use the NullMap so it
// shows up as null.
m = NullMap{}
}
indexes[i] = m
}
return indexes
}
func (b *subqueryBuilder) mapAuxField(name *VarRef) IteratorMap {
offset := 0
for i, f := range b.stmt.Fields {
if f.Name() == name.Val {
return FieldMap(i + offset)
} else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") {
// We may match one of the arguments in "top" or "bottom".
if len(call.Args) > 2 {
for j, arg := range call.Args[1 : len(call.Args)-1] {
if arg, ok := arg.(*VarRef); ok && arg.Val == name.Val {
return FieldMap(i + j + 1)
}
}
// Increment the offset so we have the correct index for later fields.
offset += len(call.Args) - 2
}
}
}
// Unable to find this in the list of fields.
// Look within the dimensions and create a field if we find it.
for _, d := range b.stmt.Dimensions {
if d, ok := d.Expr.(*VarRef); ok && name.Val == d.Val {
return TagMap(d.Val)
}
}
// Unable to find any matches.
return nil
}
func (b *subqueryBuilder) buildVarRefIterator(expr *VarRef, opt IteratorOptions) (Iterator, error) {
// Look for the field or tag that is driving this query.
driver := b.mapAuxField(expr)
if driver == nil {
// Exit immediately if there is no driver. If there is no driver, there
// are no results. Period.
return nil, nil
}
// Determine necessary auxiliary fields for this query.
auxFields := opt.Aux
conds := ExprNames(opt.Condition)
if len(conds) > 0 && len(opt.Aux) > 0 {
// Combine the auxiliary fields requested with the ones in the condition.
auxFields = make([]VarRef, len(opt.Aux)+len(conds))
copy(auxFields, opt.Aux)
copy(auxFields[len(opt.Aux):], conds)
} else if len(conds) > 0 {
// Set the auxiliary fields to what is in the condition since we have
// requested none in the query itself.
auxFields = conds
}
// Map the auxiliary fields to their index in the subquery.
indexes := b.mapAuxFields(auxFields)
subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt)
if err != nil {
return nil, err
}
subOpt.Aux = auxFields
itrs, err := buildIterators(b.stmt, b.ic, subOpt)
if err != nil {
return nil, err
}
// Construct the iterators for the subquery.
input := NewIteratorMapper(itrs, driver, indexes, subOpt)
// If there is a condition, filter it now.
if opt.Condition != nil {
input = NewFilterIterator(input, opt.Condition, subOpt)
}
return input, nil
}
......@@ -34,6 +34,8 @@ type MetaClientMock struct {
RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
AdminUserExistsFn func() bool
SetAdminPrivilegeFn func(username string, admin bool) error
SetDataFn func(*meta.Data) error
SetPrivilegeFn func(username, database string, p influxql.Privilege) error
......@@ -43,6 +45,7 @@ type MetaClientMock struct {
UpdateUserFn func(name, password string) error
UserPrivilegeFn func(username, database string) (*influxql.Privilege, error)
UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error)
UserFn func(username string) (*meta.UserInfo, error)
UsersFn func() []meta.UserInfo
}
......@@ -150,7 +153,13 @@ func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Pr
return c.UserPrivilegesFn(username)
}
func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() }
func (c *MetaClientMock) Authenticate(username, password string) (*meta.UserInfo, error) {
return c.AuthenticateFn(username, password)
}
func (c *MetaClientMock) AdminUserExists() bool { return c.AdminUserExistsFn() }
func (c *MetaClientMock) User(username string) (*meta.UserInfo, error) { return c.UserFn(username) }
func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() }
func (c *MetaClientMock) Open() error { return c.OpenFn() }
func (c *MetaClientMock) Data() meta.Data { return c.DataFn() }
......
......@@ -46,7 +46,7 @@ const (
// Point defines the values that will be written to the database.
type Point interface {
// Name return the measurement name for the point.
Name() string
Name() []byte
// SetName updates the measurement name for the point.
SetName(string)
......@@ -60,6 +60,9 @@ type Point interface {
// SetTags replaces the tags for the point.
SetTags(tags Tags)
// HasTag returns true if the tag exists for the point.
HasTag(tag []byte) bool
// Fields returns the fields for the point.
Fields() (Fields, error)
......@@ -159,9 +162,6 @@ type FieldIterator interface {
// FloatValue returns the float value of the current field.
FloatValue() (float64, error)
// Delete deletes the current field.
Delete()
// Reset resets the iterator to its initial state.
Reset()
}
......@@ -251,6 +251,20 @@ func ParseKey(buf []byte) (string, Tags, error) {
return string(buf[:i]), tags, nil
}
func ParseTags(buf []byte) (Tags, error) {
return parseTags(buf), nil
}
func ParseName(buf []byte) ([]byte, error) {
// Ignore the error because scanMeasurement returns "missing fields" which we ignore
// when just parsing a key
state, i, _ := scanMeasurement(buf, 0)
if state == tagKeyState {
return buf[:i-1], nil
}
return buf[:i], nil
}
// ParsePointsWithPrecision is similar to ParsePoints, but allows the
// caller to provide a precision for time.
//
......@@ -1316,13 +1330,8 @@ func (p *point) name() []byte {
return name
}
// Name return the measurement name for the point.
func (p *point) Name() string {
if p.cachedName != "" {
return p.cachedName
}
p.cachedName = string(escape.Unescape(p.name()))
return p.cachedName
func (p *point) Name() []byte {
return escape.Unescape(p.name())
}
// SetName updates the measurement name for the point.
......@@ -1355,21 +1364,36 @@ func (p *point) Tags() Tags {
return p.cachedTags
}
func parseTags(buf []byte) Tags {
func (p *point) HasTag(tag []byte) bool {
if len(p.key) == 0 {
return false
}
var exists bool
walkTags(p.key, func(key, value []byte) bool {
if bytes.Equal(tag, key) {
exists = true
return false
}
return true
})
return exists
}
func walkTags(buf []byte, fn func(key, value []byte) bool) {
if len(buf) == 0 {
return nil
return
}
pos, name := scanTo(buf, 0, ',')
// it's an empty key, so there are no tags
if len(name) == 0 {
return nil
return
}
tags := make(Tags, 0, bytes.Count(buf, []byte(",")))
hasEscape := bytes.IndexByte(buf, '\\') != -1
i := pos + 1
var key, value []byte
for {
......@@ -1384,14 +1408,29 @@ func parseTags(buf []byte) Tags {
}
if hasEscape {
tags = append(tags, NewTag(unescapeTag(key), unescapeTag(value)))
if !fn(unescapeTag(key), unescapeTag(value)) {
return
}
} else {
tags = append(tags, NewTag(key, value))
if !fn(key, value) {
return
}
}
i++
}
}
func parseTags(buf []byte) Tags {
if len(buf) == 0 {
return nil
}
tags := make(Tags, 0, bytes.Count(buf, []byte(",")))
walkTags(buf, func(key, value []byte) bool {
tags = append(tags, NewTag(key, value))
return true
})
return tags
}
......@@ -1404,7 +1443,7 @@ func MakeKey(name []byte, tags Tags) []byte {
// SetTags replaces the tags for the point.
func (p *point) SetTags(tags Tags) {
p.key = MakeKey([]byte(p.Name()), tags)
p.key = MakeKey(p.Name(), tags)
p.cachedTags = tags
}
......@@ -1414,7 +1453,7 @@ func (p *point) AddTag(key, value string) {
tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)})
sort.Sort(tags)
p.cachedTags = tags
p.key = MakeKey([]byte(p.Name()), tags)
p.key = MakeKey(p.Name(), tags)
}
// Fields returns the fields for the point.
......@@ -1879,40 +1918,35 @@ func (a Tags) HashKey() []byte {
return nil
}
// Type invariant: Tags are sorted
escaped := make(Tags, 0, len(a))
sz := 0
for _, t := range a {
ek := escapeTag(t.Key)
ev := escapeTag(t.Value)
if len(ev) > 0 {
escaped = append(escaped, Tag{Key: ek, Value: ev})
sz += len(ek) + len(ev)
}
}
// Extract keys and determine final size.
sz := len(escaped) + (len(escaped) * 2) // separators
keys := make([][]byte, len(escaped)+1)
for i, t := range escaped {
keys[i] = t.Key
sz += len(t.Key) + len(t.Value)
}
keys = keys[:len(escaped)]
sort.Sort(byteSlices(keys))
sz += len(escaped) + (len(escaped) * 2) // separators
// Generate marshaled bytes.
b := make([]byte, sz)
buf := b
idx := 0
for i, k := range keys {
for _, k := range escaped {
buf[idx] = ','
idx++
copy(buf[idx:idx+len(k)], k)
idx += len(k)
copy(buf[idx:idx+len(k.Key)], k.Key)
idx += len(k.Key)
buf[idx] = '='
idx++
v := escaped[i].Value
copy(buf[idx:idx+len(v)], v)
idx += len(v)
copy(buf[idx:idx+len(k.Value)], k.Value)
idx += len(k.Value)
}
return b[:idx]
}
......@@ -2051,26 +2085,6 @@ func (p *point) FloatValue() (float64, error) {
return f, nil
}
// Delete deletes the current field.
func (p *point) Delete() {
switch {
case p.it.end == p.it.start:
case p.it.end >= len(p.fields):
// Remove the trailing comma if there are more than one fields
p.fields = bytes.TrimSuffix(p.fields[:p.it.start], []byte(","))
case p.it.start == 0:
p.fields = p.fields[p.it.end:]
default:
p.fields = append(p.fields[:p.it.start], p.fields[p.it.end:]...)
}
p.it.end = p.it.start
p.it.key = nil
p.it.valueBuf = nil
p.it.fieldType = Empty
}
// Reset resets the iterator to its initial state.
func (p *point) Reset() {
p.it.fieldType = Empty
......
......@@ -40,6 +40,16 @@ func TestMarshal(t *testing.T) {
}
}
func TestTags_HashKey(t *testing.T) {
tags = models.NewTags(map[string]string{"A FOO": "bar", "APPLE": "orange", "host": "serverA", "region": "uswest"})
got := tags.HashKey()
if exp := ",A\\ FOO=bar,APPLE=orange,host=serverA,region=uswest"; string(got) != exp {
t.Log("got: ", string(got))
t.Log("exp: ", exp)
t.Error("invalid match")
}
}
func BenchmarkMarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
tags.HashKey()
......@@ -761,7 +771,7 @@ func TestParsePointWhitespace(t *testing.T) {
t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp)
}
if got, exp := pts[0].Name(), expPoint.Name(); got != exp {
if got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp {
t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp)
}
......@@ -2183,130 +2193,6 @@ m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456
}
}
func TestPoint_FieldIterator_Delete_Begin(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"b": float64(2), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
if _, err = models.ParsePointsString(points[0].String()); err != nil {
t.Fatalf("Failed to parse point: %v", err)
}
}
func TestPoint_FieldIterator_Delete_Middle(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
if _, err = models.ParsePointsString(points[0].String()); err != nil {
t.Fatalf("Failed to parse point: %v", err)
}
}
func TestPoint_FieldIterator_Delete_End(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Next() // c
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "b": float64(2)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
if _, err = models.ParsePointsString(points[0].String()); err != nil {
t.Fatalf("Failed to parse point: %v", err)
}
}
func TestPoint_FieldIterator_Delete_Nothing(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Delete()
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "b": float64(2), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
if _, err = models.ParsePointsString(points[0].String()); err != nil {
t.Fatalf("Failed to parse point: %v", err)
}
}
func TestPoint_FieldIterator_Delete_Twice(t *testing.T) {
points, err := models.ParsePointsString(`m a=1,b=2,c=3`)
if err != nil || len(points) != 1 {
t.Fatal("failed parsing point")
}
fi := points[0].FieldIterator()
fi.Next() // a
fi.Next() // b
fi.Delete()
fi.Delete() // no-op
fi.Reset()
got := toFields(fi)
exp := models.Fields{"a": float64(1), "c": float64(3)}
if !reflect.DeepEqual(got, exp) {
t.Fatalf("Delete failed, got %#v, exp %#v", got, exp)
}
if _, err = models.ParsePointsString(points[0].String()); err != nil {
t.Fatalf("Failed to parse point: %v", err)
}
}
func TestEscapeStringField(t *testing.T) {
cases := []struct {
in string
......
This diff is collapsed. Click to expand it.
......@@ -82,16 +82,16 @@ func (v *compressedList) MarshalBinary() (data []byte, err error) {
func (v *compressedList) UnmarshalBinary(data []byte) error {
// Set the count.
v.count = binary.BigEndian.Uint32(data[:4])
v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:]
// Set the last value.
v.last = binary.BigEndian.Uint32(data[4:8])
v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:]
// Set the list.
sz := int(data[8]) | int(data[9]) | int(data[10]) | int(data[11])
v.b = make([]uint8, 0, sz)
for i := 12; i < sz+12; i++ {
v.b = append(v.b, uint8(data[i]))
sz, data := binary.BigEndian.Uint32(data[:4]), data[4:]
v.b = make([]uint8, sz)
for i := uint32(0); i < sz; i++ {
v.b[i] = uint8(data[i])
}
return nil
}
......
......@@ -41,3 +41,60 @@ func (p *Bytes) Put(c []byte) {
default:
}
}
// LimitedBytes is a pool of byte slices that can be re-used. Slices in
// this pool will not be garbage collected when not in use. The pool will
// hold onto a fixed number of byte slices of a maximum size. If the pool
// is empty and max pool size has not been allocated yet, it will return a
// new byte slice. Byte slices added to the pool that are over the max size
// are dropped.
type LimitedBytes struct {
allocated int64
maxSize int
pool chan []byte
}
// NewBytes returns a Bytes pool with capacity for max byte slices
// to be pool.
func NewLimitedBytes(capacity int, maxSize int) *LimitedBytes {
return &LimitedBytes{
pool: make(chan []byte, capacity),
maxSize: maxSize,
}
}
// Get returns a byte slice size with at least sz capacity. Items
// returned may not be in the zero state and should be reset by the
// caller.
func (p *LimitedBytes) Get(sz int) []byte {
var c []byte
// If we have not allocated our capacity, return a new allocation,
// otherwise block until one frees up.
select {
case c = <-p.pool:
default:
return make([]byte, sz)
}
if cap(c) < sz {
return make([]byte, sz)
}
return c[:sz]
}
// Put returns a slice back to the pool. If the pool is full, the byte
// slice is discarded. If the byte slice is over the configured max size
// of any byte slice in the pool, it is discared.
func (p *LimitedBytes) Put(c []byte) {
// Drop buffers that are larger than the max size
if cap(c) >= p.maxSize {
return
}
select {
case p.pool <- c:
default:
}
}
......
package pool_test
import (
"testing"
"github.com/influxdata/influxdb/pkg/pool"
)
func TestLimitedBytePool_Put_MaxSize(t *testing.T) {
bp := pool.NewLimitedBytes(1, 10)
bp.Put(make([]byte, 1024)) // should be dropped
if got, exp := cap(bp.Get(10)), 10; got != exp {
t.Fatalf("max cap size exceeded: got %v, exp %v", got, exp)
}
}
# InfluxDB Admin Interface
This is the built-in admin interface that ships with InfluxDB. The service is intended to have little overhead and minimal preprocessing steps.
## How it works
Static assets, located in the `assets` directory, are embedded in the `influxd` binary and served from memory using a simple fileserver.
The admin UI itself uses [React](https://github.com/facebook/react) for the user interface to interact directly with the InfluxDB API, usually running on port `8086`.
## Building
The only step required to bundle the admin UI with InfluxDB is to create a compressed file system using `statik` as follows:
```
go get github.com/rakyll/statik # make sure $GOPATH/bin is listed in your PATH
cd $GOPATH/src/github.com/influxdata/influxdb
go generate github.com/influxdata/influxdb/services/admin
```
The `go generate ./...` command will run `statik` to generate the `statik/statik.go` file. The generated `go` file will embed the admin interface assets into the InfluxDB binary.
This step should be run before submitting any pull requests which include modifications to admin interface assets.
package admin // import "github.com/influxdata/influxdb/services/admin"
//go:generate statik -src=assets
//go:generate go fmt statik/statik.go
Please note that these files are embedded into the `influxd` binary using the
[statik](https://github.com/rakyll/statik) tool. `go generate` needs to be run
whenever there are changes made to files in this directory. See the admin
interface readme for more information.
body {
padding-top: 70px;
/* Required padding for .navbar-fixed-top. Remove if using .navbar-static-top. Change if height of navigation changes. */
}
html,
body {
height: 100%;
/* The html and body elements cannot have any padding or margin. */
}
code {
display: block;
}
#settings {
display: none;
}
#settings form > div {
margin-right: 20px;
}
#settings form input#port {
width: 80px;
}
#settings form label {
padding-right: 5px;
}
div#content {
margin-bottom: -10px;
}
div#table h2 {
color: #999;
margin-top: -8px;
font-size: 16px
}
textarea#content-data {
font-family: "Courier New";
height: 200px;
}
div#query-alerts {
margin-top: 30px;
}
div#modal-error, div#modal-success, div#query-error, div#query-success {
display: none;
}
/* Wrapper for page content to push down footer */
#wrap {
min-height: 100%;
height: auto !important;
height: 100%;
/* Negative indent footer by it's height */
margin: 0 auto -60px;
}
/* Set the fixed height of the footer here */
#push,
#footer {
height: 60px;
}
#footer {
background-color: #f5f5f5;
border-top: 1px solid #dfdfdf;
}
#footer p {
margin: 20px 0;
}
/* Lastly, apply responsive CSS fixes as necessary */
@media (max-width: 767px) {
#footer {
margin-left: -20px;
margin-right: -20px;
padding-left: 20px;
padding-right: 20px;
}
}
This diff could not be displayed because it is too large.
.dropdown-menu > li > label {
display: block;
padding: 3px 20px;
clear: both;
font-weight: normal;
line-height: 1.42857143;
color: #333333;
white-space: nowrap;
}
.dropdown-menu > li > label:hover,
.dropdown-menu > li > label:focus {
text-decoration: none;
color: #262626;
background-color: #f5f5f5;
}
.dropdown-menu > li > input:checked ~ label,
.dropdown-menu > li > input:checked ~ label:hover,
.dropdown-menu > li > input:checked ~ label:focus,
.dropdown-menu > .active > label,
.dropdown-menu > .active > label:hover,
.dropdown-menu > .active > label:focus {
color: #ffffff;
text-decoration: none;
outline: 0;
background-color: #428bca;
}
.dropdown-menu > li > input[disabled] ~ label,
.dropdown-menu > li > input[disabled] ~ label:hover,
.dropdown-menu > li > input[disabled] ~ label:focus,
.dropdown-menu > .disabled > label,
.dropdown-menu > .disabled > label:hover,
.dropdown-menu > .disabled > label:focus {
color: #999999;
}
.dropdown-menu > li > input[disabled] ~ label:hover,
.dropdown-menu > li > input[disabled] ~ label:focus,
.dropdown-menu > .disabled > label:hover,
.dropdown-menu > .disabled > label:focus {
text-decoration: none;
background-color: transparent;
background-image: none;
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
cursor: not-allowed;
}
.dropdown-menu > li > label {
margin-bottom: 0;
cursor: pointer;
}
.dropdown-menu > li > input[type="radio"],
.dropdown-menu > li > input[type="checkbox"] {
display: none;
position: absolute;
top: -9999em;
left: -9999em;
}
.dropdown-menu > li > label:focus,
.dropdown-menu > li > input:focus ~ label {
outline: thin dotted;
outline: 5px auto -webkit-focus-ring-color;
outline-offset: -2px;
}
.dropdown-menu.pull-right {
right: 0;
left: auto;
}
.dropdown-menu.pull-top {
bottom: 100%;
top: auto;
margin: 0 0 2px;
-webkit-box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);
box-shadow: 0 -6px 12px rgba(0, 0, 0, 0.175);
}
.dropdown-menu.pull-center {
right: 50%;
left: auto;
}
.dropdown-menu.pull-middle {
right: 100%;
margin: 0 2px 0 0;
box-shadow: -5px 0 10px rgba(0, 0, 0, 0.2);
left: auto;
}
.dropdown-menu.pull-middle.pull-right {
right: auto;
left: 100%;
margin: 0 0 0 2px;
box-shadow: 5px 0 10px rgba(0, 0, 0, 0.2);
}
.dropdown-menu.pull-middle.pull-center {
right: 50%;
margin: 0;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.2);
}
.dropdown-menu.bullet {
margin-top: 8px;
}
.dropdown-menu.bullet:before {
width: 0;
height: 0;
content: '';
display: inline-block;
position: absolute;
border-color: transparent;
border-style: solid;
-webkit-transform: rotate(360deg);
border-width: 0 7px 7px;
border-bottom-color: #cccccc;
border-bottom-color: rgba(0, 0, 0, 0.15);
top: -7px;
left: 9px;
}
.dropdown-menu.bullet:after {
width: 0;
height: 0;
content: '';
display: inline-block;
position: absolute;
border-color: transparent;
border-style: solid;
-webkit-transform: rotate(360deg);
border-width: 0 6px 6px;
border-bottom-color: #ffffff;
top: -6px;
left: 10px;
}
.dropdown-menu.bullet.pull-right:before {
left: auto;
right: 9px;
}
.dropdown-menu.bullet.pull-right:after {
left: auto;
right: 10px;
}
.dropdown-menu.bullet.pull-top {
margin-top: 0;
margin-bottom: 8px;
}
.dropdown-menu.bullet.pull-top:before {
top: auto;
bottom: -7px;
border-bottom-width: 0;
border-top-width: 7px;
border-top-color: #cccccc;
border-top-color: rgba(0, 0, 0, 0.15);
}
.dropdown-menu.bullet.pull-top:after {
top: auto;
bottom: -6px;
border-bottom: none;
border-top-width: 6px;
border-top-color: #ffffff;
}
.dropdown-menu.bullet.pull-center:before {
left: auto;
right: 50%;
margin-right: -7px;
}
.dropdown-menu.bullet.pull-center:after {
left: auto;
right: 50%;
margin-right: -6px;
}
.dropdown-menu.bullet.pull-middle {
margin-right: 8px;
}
.dropdown-menu.bullet.pull-middle:before {
top: 50%;
left: 100%;
right: auto;
margin-top: -7px;
border-right-width: 0;
border-bottom-color: transparent;
border-top-width: 7px;
border-left-color: #cccccc;
border-left-color: rgba(0, 0, 0, 0.15);
}
.dropdown-menu.bullet.pull-middle:after {
top: 50%;
left: 100%;
right: auto;
margin-top: -6px;
border-right-width: 0;
border-bottom-color: transparent;
border-top-width: 6px;
border-left-color: #ffffff;
}
.dropdown-menu.bullet.pull-middle.pull-right {
margin-right: 0;
margin-left: 8px;
}
.dropdown-menu.bullet.pull-middle.pull-right:before {
left: -7px;
border-left-width: 0;
border-right-width: 7px;
border-right-color: #cccccc;
border-right-color: rgba(0, 0, 0, 0.15);
}
.dropdown-menu.bullet.pull-middle.pull-right:after {
left: -6px;
border-left-width: 0;
border-right-width: 6px;
border-right-color: #ffffff;
}
.dropdown-menu.bullet.pull-middle.pull-center {
margin-left: 0;
margin-right: 0;
}
.dropdown-menu.bullet.pull-middle.pull-center:before {
border: none;
display: none;
}
.dropdown-menu.bullet.pull-middle.pull-center:after {
border: none;
display: none;
}
.dropdown-submenu {
position: relative;
}
.dropdown-submenu > .dropdown-menu {
top: 0;
left: 100%;
margin-top: -6px;
margin-left: -1px;
border-top-left-radius: 0;
}
.dropdown-submenu > a:before {
display: block;
float: right;
width: 0;
height: 0;
content: "";
margin-top: 6px;
margin-right: -8px;
border-width: 4px 0 4px 4px;
border-style: solid;
border-left-style: dashed;
border-top-color: transparent;
border-bottom-color: transparent;
}
@media (max-width: 767px) {
.navbar-nav .dropdown-submenu > a:before {
margin-top: 8px;
border-color: inherit;
border-style: solid;
border-width: 4px 4px 0;
border-left-color: transparent;
border-right-color: transparent;
}
.navbar-nav .dropdown-submenu > a {
padding-left: 40px;
}
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > a,
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > label {
padding-left: 35px;
}
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > a,
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > label {
padding-left: 45px;
}
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
padding-left: 55px;
}
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
padding-left: 65px;
}
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > a,
.navbar-nav > .open > .dropdown-menu > .dropdown-submenu > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > .dropdown-menu > li > label {
padding-left: 75px;
}
}
.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,
.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,
.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {
background-color: #e7e7e7;
color: #555555;
}
@media (max-width: 767px) {
.navbar-default .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {
border-top-color: #555555;
}
}
.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a,
.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:hover,
.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:focus {
background-color: #080808;
color: #ffffff;
}
@media (max-width: 767px) {
.navbar-inverse .navbar-nav .open > .dropdown-menu > .dropdown-submenu.open > a:before {
border-top-color: #ffffff;
}
}
package admin
const (
// DefaultBindAddress is the default bind address for the HTTP server.
DefaultBindAddress = ":8083"
)
// Config represents the configuration for the admin service.
type Config struct {
Enabled bool `toml:"enabled"`
BindAddress string `toml:"bind-address"`
HTTPSEnabled bool `toml:"https-enabled"`
HTTPSCertificate string `toml:"https-certificate"`
Version string `toml:"-"`
}
// NewConfig returns an instance of Config with defaults.
func NewConfig() Config {
return Config{
BindAddress: DefaultBindAddress,
HTTPSEnabled: false,
HTTPSCertificate: "/etc/ssl/influxdb.pem",
}
}
package admin_test
import (
"testing"
"github.com/BurntSushi/toml"
"github.com/influxdata/influxdb/services/admin"
)
func TestConfig_Parse(t *testing.T) {
// Parse configuration.
var c admin.Config
if _, err := toml.Decode(`
enabled = true
bind-address = ":8083"
https-enabled = true
https-certificate = "/dev/null"
`, &c); err != nil {
t.Fatal(err)
}
// Validate configuration.
if c.Enabled != true {
t.Fatalf("unexpected enabled: %v", c.Enabled)
} else if c.BindAddress != ":8083" {
t.Fatalf("unexpected bind address: %s", c.BindAddress)
} else if c.HTTPSEnabled != true {
t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled)
} else if c.HTTPSCertificate != "/dev/null" {
t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate)
}
}
package admin // import "github.com/influxdata/influxdb/services/admin"
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"strings"
// Register static assets via statik.
_ "github.com/influxdata/influxdb/services/admin/statik"
"github.com/rakyll/statik/fs"
"github.com/uber-go/zap"
)
// Service manages the listener for an admin endpoint.
type Service struct {
listener net.Listener
addr string
https bool
cert string
err chan error
version string
logger zap.Logger
}
// NewService returns a new instance of Service.
func NewService(c Config) *Service {
return &Service{
addr: c.BindAddress,
https: c.HTTPSEnabled,
cert: c.HTTPSCertificate,
err: make(chan error),
version: c.Version,
logger: zap.New(zap.NullEncoder()),
}
}
// Open starts the service
func (s *Service) Open() error {
s.logger.Info("Starting admin service")
s.logger.Info("DEPRECATED: This plugin is deprecated as of 1.1.0 and will be removed in a future release")
// Open listener.
if s.https {
cert, err := tls.LoadX509KeyPair(s.cert, s.cert)
if err != nil {
return err
}
listener, err := tls.Listen("tcp", s.addr, &tls.Config{
Certificates: []tls.Certificate{cert},
})
if err != nil {
return err
}
s.logger.Info(fmt.Sprint("Listening on HTTPS: ", listener.Addr().String()))
s.listener = listener
} else {
listener, err := net.Listen("tcp", s.addr)
if err != nil {
return err
}
s.logger.Info(fmt.Sprint("Listening on HTTP: ", listener.Addr().String()))
s.listener = listener
}
// Begin listening for requests in a separate goroutine.
go s.serve()
return nil
}
// Close closes the underlying listener.
func (s *Service) Close() error {
if s.listener != nil {
return s.listener.Close()
}
return nil
}
// WithLogger sets the logger for the service
func (s *Service) WithLogger(log zap.Logger) {
s.logger = log.With(zap.String("service", "admin"))
}
// Err returns a channel for fatal errors that occur on the listener.
func (s *Service) Err() <-chan error { return s.err }
// Addr returns the listener's address. Returns nil if listener is closed.
func (s *Service) Addr() net.Addr {
if s.listener != nil {
return s.listener.Addr()
}
return nil
}
// serve serves the handler from the listener.
func (s *Service) serve() {
addVersionHeaderThenServe := func(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-InfluxDB-Version", s.version)
h.ServeHTTP(w, r)
}
}
// Instantiate file system from embedded admin.
statikFS, err := fs.New()
if err != nil {
panic(err)
}
// Run file system handler on listener.
err = http.Serve(s.listener, addVersionHeaderThenServe(http.FileServer(statikFS)))
if err != nil && !strings.Contains(err.Error(), "closed") {
s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err)
}
}
package admin_test
import (
"io/ioutil"
"net/http"
"testing"
"github.com/influxdata/influxdb/services/admin"
)
// Ensure service can serve the root index page of the admin.
func TestService_Index(t *testing.T) {
// Start service on random port.
s := admin.NewService(admin.Config{BindAddress: "127.0.0.1:0"})
if err := s.Open(); err != nil {
t.Fatal(err)
}
defer s.Close()
// Request root index page.
resp, err := http.Get("http://" + s.Addr().String())
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
// Validate status code and body.
if resp.StatusCode != http.StatusOK {
t.Fatalf("unexpected status: %d", resp.StatusCode)
} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
t.Fatalf("unable to read body: %s", err)
}
}
Please note that this file is automatically generated by the
[statik](https://github.com/rakyll/statik) tool, and should not be
updated directly. See the Admin UI readme for more information.
This diff could not be displayed because it is too large.
......@@ -222,8 +222,8 @@ func TestParse(t *testing.T) {
// If we erred out,it was intended and the following tests won't work
continue
}
if point.Name() != test.measurement {
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, point.Name())
if string(point.Name()) != test.measurement {
t.Fatalf("name parse failer. expected %v, got %v", test.measurement, string(point.Name()))
}
if len(point.Tags()) != len(test.tags) {
t.Fatalf("tags len mismatch. expected %d, got %d", len(test.tags), len(point.Tags()))
......
......@@ -10,7 +10,6 @@ import (
"io"
"log"
"net/http"
"net/http/pprof"
"os"
"runtime/debug"
"strconv"
......@@ -38,6 +37,10 @@ const (
//
// This has no relation to the number of bytes that are returned.
DefaultChunkSize = 10000
DefaultDebugRequestsInterval = 10 * time.Second
MaxDebugRequestsInterval = 6 * time.Hour
)
// AuthenticationMethod defines the type of authentication used.
......@@ -71,6 +74,7 @@ type Handler struct {
MetaClient interface {
Database(name string) *meta.DatabaseInfo
Databases() []meta.DatabaseInfo
Authenticate(username, password string) (ui *meta.UserInfo, err error)
User(username string) (*meta.UserInfo, error)
AdminUserExists() bool
......@@ -99,16 +103,19 @@ type Handler struct {
Logger zap.Logger
CLFLogger *log.Logger
stats *Statistics
requestTracker *RequestTracker
}
// NewHandler returns a new instance of handler with routes.
func NewHandler(c Config) *Handler {
h := &Handler{
mux: pat.New(),
Config: &c,
Logger: zap.New(zap.NullEncoder()),
CLFLogger: log.New(os.Stderr, "[httpd] ", 0),
stats: &Statistics{},
mux: pat.New(),
Config: &c,
Logger: zap.New(zap.NullEncoder()),
CLFLogger: log.New(os.Stderr, "[httpd] ", 0),
stats: &Statistics{},
requestTracker: NewRequestTracker(),
}
h.AddRoutes([]Route{
......@@ -245,18 +252,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Influxdb-Version", h.Version)
if strings.HasPrefix(r.URL.Path, "/debug/pprof") && h.Config.PprofEnabled {
switch r.URL.Path {
case "/debug/pprof/cmdline":
pprof.Cmdline(w, r)
case "/debug/pprof/profile":
pprof.Profile(w, r)
case "/debug/pprof/symbol":
pprof.Symbol(w, r)
default:
pprof.Index(w, r)
}
h.handleProfiles(w, r)
} else if strings.HasPrefix(r.URL.Path, "/debug/vars") {
h.serveExpvar(w, r)
} else if strings.HasPrefix(r.URL.Path, "/debug/requests") {
h.serveDebugRequests(w, r)
} else {
h.mux.ServeHTTP(w, r)
}
......@@ -282,6 +282,7 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user *meta.
defer func(start time.Time) {
atomic.AddInt64(&h.stats.QueryRequestDuration, time.Since(start).Nanoseconds())
}(time.Now())
h.requestTracker.Add(r, user)
// Retrieve the underlying ResponseWriter or initialize our own.
rw, ok := w.(ResponseWriter)
......@@ -584,6 +585,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
atomic.AddInt64(&h.stats.ActiveWriteRequests, -1)
atomic.AddInt64(&h.stats.WriteRequestDuration, time.Since(start).Nanoseconds())
}(time.Now())
h.requestTracker.Add(r, user)
database := r.URL.Query().Get("db")
if database == "" {
......@@ -675,7 +677,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
} else if werr, ok := err.(tsdb.PartialWriteError); ok {
atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)-werr.Dropped))
atomic.AddInt64(&h.stats.PointsWrittenDropped, int64(werr.Dropped))
h.httpError(w, fmt.Sprintf("partial write: %v", werr), http.StatusBadRequest)
h.httpError(w, werr.Error(), http.StatusBadRequest)
return
} else if err != nil {
atomic.AddInt64(&h.stats.PointsWrittenFail, int64(len(points)))
......@@ -686,7 +688,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user *meta.
atomic.AddInt64(&h.stats.PointsWrittenOK, int64(len(points)))
// The other points failed to parse which means the client sent invalid line protocol. We return a 400
// response code as well as the lines that failed to parse.
h.httpError(w, fmt.Sprintf("partial write:\n%v", parseError), http.StatusBadRequest)
h.httpError(w, tsdb.PartialWriteError{Reason: parseError.Error()}.Error(), http.StatusBadRequest)
return
}
......@@ -834,6 +836,64 @@ func (h *Handler) serveExpvar(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "\n}")
}
// serveDebugRequests will track requests for a period of time.
func (h *Handler) serveDebugRequests(w http.ResponseWriter, r *http.Request) {
var d time.Duration
if s := r.URL.Query().Get("seconds"); s == "" {
d = DefaultDebugRequestsInterval
} else if seconds, err := strconv.ParseInt(s, 10, 64); err != nil {
h.httpError(w, err.Error(), http.StatusBadRequest)
return
} else {
d = time.Duration(seconds) * time.Second
if d > MaxDebugRequestsInterval {
h.httpError(w, fmt.Sprintf("exceeded maximum interval time: %s > %s",
influxql.FormatDuration(d),
influxql.FormatDuration(MaxDebugRequestsInterval)),
http.StatusBadRequest)
return
}
}
var closing <-chan bool
if notifier, ok := w.(http.CloseNotifier); ok {
closing = notifier.CloseNotify()
}
profile := h.requestTracker.TrackRequests()
timer := time.NewTimer(d)
select {
case <-timer.C:
profile.Stop()
case <-closing:
// Connection was closed early.
profile.Stop()
timer.Stop()
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Add("Connection", "close")
fmt.Fprintln(w, "{")
first := true
for req, st := range profile.Requests {
val, err := json.Marshal(st)
if err != nil {
continue
}
if !first {
fmt.Fprintln(w, ",")
}
first = false
fmt.Fprintf(w, "%q: ", req.String())
w.Write(bytes.TrimSpace(val))
}
fmt.Fprintln(w, "\n}")
}
// parseSystemDiagnostics converts the system diagnostics into an appropriate
// format for marshaling to JSON in the /debug/vars format.
func parseSystemDiagnostics(d *diagnostics.Diagnostics) (map[string]interface{}, error) {
......
......@@ -14,6 +14,8 @@ import (
"testing"
"time"
"github.com/influxdata/influxdb/internal"
"github.com/dgrijalva/jwt-go"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
......@@ -605,7 +607,7 @@ func TestHandler_XForwardedFor(t *testing.T) {
// NewHandler represents a test wrapper for httpd.Handler.
type Handler struct {
*httpd.Handler
MetaClient HandlerMetaStore
MetaClient *internal.MetaClientMock
StatementExecutor HandlerStatementExecutor
QueryAuthorizer HandlerQueryAuthorizer
}
......@@ -619,7 +621,10 @@ func NewHandler(requireAuthentication bool) *Handler {
h := &Handler{
Handler: httpd.NewHandler(config),
}
h.Handler.MetaClient = &h.MetaClient
h.MetaClient = &internal.MetaClientMock{}
h.Handler.MetaClient = h.MetaClient
h.Handler.QueryExecutor = influxql.NewQueryExecutor()
h.Handler.QueryExecutor.StatementExecutor = &h.StatementExecutor
h.Handler.QueryAuthorizer = &h.QueryAuthorizer
......@@ -627,39 +632,6 @@ func NewHandler(requireAuthentication bool) *Handler {
return h
}
// HandlerMetaStore is a mock implementation of Handler.MetaClient.
type HandlerMetaStore struct {
PingFn func(d time.Duration) error
DatabaseFn func(name string) *meta.DatabaseInfo
AuthenticateFn func(username, password string) (ui *meta.UserInfo, err error)
UserFn func(username string) (*meta.UserInfo, error)
AdminUserExistsFn func() bool
}
func (s *HandlerMetaStore) Ping(b bool) error {
if s.PingFn == nil {
// Default behaviour is to assume there is a leader.
return nil
}
return s.Ping(b)
}
func (s *HandlerMetaStore) Database(name string) *meta.DatabaseInfo {
return s.DatabaseFn(name)
}
func (s *HandlerMetaStore) Authenticate(username, password string) (ui *meta.UserInfo, err error) {
return s.AuthenticateFn(username, password)
}
func (s *HandlerMetaStore) AdminUserExists() bool {
return s.AdminUserExistsFn()
}
func (s *HandlerMetaStore) User(username string) (*meta.UserInfo, error) {
return s.UserFn(username)
}
// HandlerStatementExecutor is a mock implementation of Handler.StatementExecutor.
type HandlerStatementExecutor struct {
ExecuteStatementFn func(stmt influxql.Statement, ctx influxql.ExecutionContext) error
......
package httpd
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
httppprof "net/http/pprof"
"runtime/pprof"
"sort"
"strconv"
"text/tabwriter"
"time"
"github.com/influxdata/influxdb/models"
)
// handleProfiles determines which profile to return to the requester.
func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/debug/pprof/cmdline":
httppprof.Cmdline(w, r)
case "/debug/pprof/profile":
httppprof.Profile(w, r)
case "/debug/pprof/symbol":
httppprof.Symbol(w, r)
case "/debug/pprof/all":
h.archiveProfilesAndQueries(w, r)
default:
httppprof.Index(w, r)
}
}
// prof describes a profile name and a debug value, or in the case of a CPU
// profile, the number of seconds to collect the profile for.
type prof struct {
Name string
Debug int64
}
// archiveProfilesAndQueries collects the following profiles:
// - goroutine profile
// - heap profile
// - blocking profile
// - (optionally) CPU profile
//
// It also collects the following query results:
//
// - SHOW SHARDS
// - SHOW STATS
// - SHOW DIAGNOSTICS
//
// All information is added to a tar archive and then compressed, before being
// returned to the requester as an archive file. Where profiles support debug
// parameters, the profile is collected with debug=1. To optionally include a
// CPU profile, the requester should provide a `cpu` query parameter, and can
// also provide a `seconds` parameter to specify a non-default profile
// collection time. The default CPU profile collection time is 30 seconds.
//
// Example request including CPU profile:
//
// http://localhost:8086/debug/pprof/all?cpu=true&seconds=45
//
// The value after the `cpu` query parameter is not actually important, as long
// as there is something there.
//
func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {
var allProfs = []*prof{
{Name: "goroutine", Debug: 1},
{Name: "block", Debug: 1},
{Name: "heap", Debug: 1},
}
// Capture a CPU profile?
if r.FormValue("cpu") != "" {
profile := &prof{Name: "cpu"}
// For a CPU profile we'll use the Debug field to indicate the number of
// seconds to capture the profile for.
profile.Debug, _ = strconv.ParseInt(r.FormValue("seconds"), 10, 64)
if profile.Debug <= 0 {
profile.Debug = 30
}
allProfs = append([]*prof{profile}, allProfs...) // CPU profile first.
}
var (
resp bytes.Buffer // Temporary buffer for entire archive.
buf bytes.Buffer // Temporary buffer for each profile/query result.
)
gz := gzip.NewWriter(&resp)
tw := tar.NewWriter(gz)
// Collect and write out profiles.
for _, profile := range allProfs {
if profile.Name == "cpu" {
if err := pprof.StartCPUProfile(&buf); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sleep(w, time.Duration(profile.Debug)*time.Second)
pprof.StopCPUProfile()
} else {
prof := pprof.Lookup(profile.Name)
if prof == nil {
http.Error(w, "unable to find profile "+profile.Name, http.StatusInternalServerError)
return
}
if err := prof.WriteTo(&buf, int(profile.Debug)); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// Write the profile file's header.
err := tw.WriteHeader(&tar.Header{
Name: profile.Name + ".txt",
Mode: 0600,
Size: int64(buf.Len()),
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Write the profile file's data.
if _, err := tw.Write(buf.Bytes()); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Reset the buffer for the next profile.
buf.Reset()
}
// Collect and write out the queries.
var allQueries = []struct {
name string
fn func() ([]*models.Row, error)
}{
{"shards", h.showShards},
{"stats", h.showStats},
{"diagnostics", h.showDiagnostics},
}
tabW := tabwriter.NewWriter(&buf, 8, 8, 1, '\t', 0)
for _, query := range allQueries {
rows, err := query.fn()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
for i, row := range rows {
var out []byte
// Write the columns
for _, col := range row.Columns {
out = append(out, []byte(col+"\t")...)
}
out = append(out, '\n')
if _, err := tabW.Write(out); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Write all the values
for _, val := range row.Values {
out = out[:0]
for _, v := range val {
out = append(out, []byte(fmt.Sprintf("%v\t", v))...)
}
out = append(out, '\n')
if _, err := tabW.Write(out); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// Write a final newline
if i < len(rows)-1 {
if _, err := tabW.Write([]byte("\n")); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
}
if err := tabW.Flush(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
err = tw.WriteHeader(&tar.Header{
Name: query.name + ".txt",
Mode: 0600,
Size: int64(buf.Len()),
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Write the query file's data.
if _, err := tw.Write(buf.Bytes()); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Reset the buffer for the next query.
buf.Reset()
}
// Close the tar writer.
if err := tw.Close(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Close the gzip writer.
if err := gz.Close(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// Return the gzipped archive.
w.Header().Set("Content-Disposition", "attachment; filename=profiles.tar.gz")
w.Header().Set("Content-Type", "application/gzip")
io.Copy(w, &resp) // Nothing we can really do about an error at this point.
}
// showShards generates the same values that a StatementExecutor would if a
// SHOW SHARDS query was executed.
func (h *Handler) showShards() ([]*models.Row, error) {
dis := h.MetaClient.Databases()
rows := []*models.Row{}
for _, di := range dis {
row := &models.Row{Columns: []string{"id", "database", "retention_policy", "shard_group", "start_time", "end_time", "expiry_time", "owners"}, Name: di.Name}
for _, rpi := range di.RetentionPolicies {
for _, sgi := range rpi.ShardGroups {
// Shards associated with deleted shard groups are effectively deleted.
// Don't list them.
if sgi.Deleted() {
continue
}
for _, si := range sgi.Shards {
ownerIDs := make([]uint64, len(si.Owners))
for i, owner := range si.Owners {
ownerIDs[i] = owner.NodeID
}
row.Values = append(row.Values, []interface{}{
si.ID,
di.Name,
rpi.Name,
sgi.ID,
sgi.StartTime.UTC().Format(time.RFC3339),
sgi.EndTime.UTC().Format(time.RFC3339),
sgi.EndTime.Add(rpi.Duration).UTC().Format(time.RFC3339),
joinUint64(ownerIDs),
})
}
}
}
rows = append(rows, row)
}
return rows, nil
}
// showDiagnostics generates the same values that a StatementExecutor would if a
// SHOW DIAGNOSTICS query was executed.
func (h *Handler) showDiagnostics() ([]*models.Row, error) {
diags, err := h.Monitor.Diagnostics()
if err != nil {
return nil, err
}
// Get a sorted list of diagnostics keys.
sortedKeys := make([]string, 0, len(diags))
for k := range diags {
sortedKeys = append(sortedKeys, k)
}
sort.Strings(sortedKeys)
rows := make([]*models.Row, 0, len(diags))
for _, k := range sortedKeys {
row := &models.Row{Name: k}
row.Columns = diags[k].Columns
row.Values = diags[k].Rows
rows = append(rows, row)
}
return rows, nil
}
// showStats generates the same values that a StatementExecutor would if a
// SHOW STATS query was executed.
func (h *Handler) showStats() ([]*models.Row, error) {
stats, err := h.Monitor.Statistics(nil)
if err != nil {
return nil, err
}
var rows []*models.Row
for _, stat := range stats {
row := &models.Row{Name: stat.Name, Tags: stat.Tags}
values := make([]interface{}, 0, len(stat.Values))
for _, k := range stat.ValueNames() {
row.Columns = append(row.Columns, k)
values = append(values, stat.Values[k])
}
row.Values = [][]interface{}{values}
rows = append(rows, row)
}
return rows, nil
}
// joinUint64 returns a comma-delimited string of uint64 numbers.
func joinUint64(a []uint64) string {
var buf []byte // Could take a guess at initial size here.
for i, x := range a {
if i != 0 {
buf = append(buf, ',')
}
buf = strconv.AppendUint(buf, x, 10)
}
return string(buf)
}
// Taken from net/http/pprof/pprof.go
func sleep(w http.ResponseWriter, d time.Duration) {
var clientGone <-chan bool
if cn, ok := w.(http.CloseNotifier); ok {
clientGone = cn.CloseNotify()
}
select {
case <-time.After(d):
case <-clientGone:
}
}
package httpd
import (
"container/list"
"fmt"
"net"
"net/http"
"sync"
"sync/atomic"
"github.com/influxdata/influxdb/services/meta"
)
type RequestInfo struct {
IPAddr string
Username string
}
type RequestStats struct {
Writes int64 `json:"writes"`
Queries int64 `json:"queries"`
}
func (r *RequestInfo) String() string {
if r.Username != "" {
return fmt.Sprintf("%s:%s", r.Username, r.IPAddr)
}
return r.IPAddr
}
type RequestProfile struct {
tracker *RequestTracker
elem *list.Element
mu sync.RWMutex
Requests map[RequestInfo]*RequestStats
}
func (p *RequestProfile) AddWrite(info RequestInfo) {
p.add(info, p.addWrite)
}
func (p *RequestProfile) AddQuery(info RequestInfo) {
p.add(info, p.addQuery)
}
func (p *RequestProfile) add(info RequestInfo, fn func(*RequestStats)) {
// Look for a request entry for this request.
p.mu.RLock()
st, ok := p.Requests[info]
p.mu.RUnlock()
if ok {
fn(st)
return
}
// There is no entry in the request tracker. Create one.
p.mu.Lock()
if st, ok := p.Requests[info]; ok {
// Something else created this entry while we were waiting for the lock.
p.mu.Unlock()
fn(st)
return
}
st = &RequestStats{}
p.Requests[info] = st
p.mu.Unlock()
fn(st)
}
func (p *RequestProfile) addWrite(st *RequestStats) {
atomic.AddInt64(&st.Writes, 1)
}
func (p *RequestProfile) addQuery(st *RequestStats) {
atomic.AddInt64(&st.Queries, 1)
}
// Stop informs the RequestTracker to stop collecting statistics for this
// profile.
func (p *RequestProfile) Stop() {
p.tracker.mu.Lock()
p.tracker.profiles.Remove(p.elem)
p.tracker.mu.Unlock()
}
type RequestTracker struct {
mu sync.RWMutex
profiles *list.List
}
func NewRequestTracker() *RequestTracker {
return &RequestTracker{
profiles: list.New(),
}
}
func (rt *RequestTracker) TrackRequests() *RequestProfile {
// Perform the memory allocation outside of the lock.
profile := &RequestProfile{
Requests: make(map[RequestInfo]*RequestStats),
tracker: rt,
}
rt.mu.Lock()
profile.elem = rt.profiles.PushBack(profile)
rt.mu.Unlock()
return profile
}
func (rt *RequestTracker) Add(req *http.Request, user *meta.UserInfo) {
rt.mu.RLock()
if rt.profiles.Len() == 0 {
rt.mu.RUnlock()
return
}
defer rt.mu.RUnlock()
var info RequestInfo
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return
}
info.IPAddr = host
if user != nil {
info.Username = user.Name
}
// Add the request info to the profiles.
for p := rt.profiles.Front(); p != nil; p = p.Next() {
profile := p.Value.(*RequestProfile)
if req.URL.Path == "/query" {
profile.AddQuery(info)
} else if req.URL.Path == "/write" {
profile.AddWrite(info)
}
}
}
......@@ -145,6 +145,11 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {
}
for _, values := range row.Values {
for i, value := range values {
if value == nil {
w.columns[i+2] = ""
continue
}
switch v := value.(type) {
case float64:
w.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64)
......@@ -160,6 +165,8 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) {
}
case time.Time:
w.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10)
case *float64, *int64, *string, *bool:
w.columns[i+2] = ""
}
}
csv.Write(w.columns)
......
package httpd_test
import (
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/services/httpd"
)
func TestResponseWriter_CSV(t *testing.T) {
header := make(http.Header)
header.Set("Accept", "text/csv")
r := &http.Request{
Header: header,
URL: &url.URL{},
}
w := httptest.NewRecorder()
writer := httpd.NewResponseWriter(w, r)
writer.WriteResponse(httpd.Response{
Results: []*influxql.Result{
{
StatementID: 0,
Series: []*models.Row{
{
Name: "cpu",
Tags: map[string]string{
"host": "server01",
"region": "uswest",
},
Columns: []string{"time", "value"},
Values: [][]interface{}{
{time.Unix(0, 10), float64(2.5)},
{time.Unix(0, 20), int64(5)},
{time.Unix(0, 30), nil},
{time.Unix(0, 40), "foobar"},
{time.Unix(0, 50), true},
{time.Unix(0, 60), false},
},
},
},
},
},
})
if got, want := w.Body.String(), `name,tags,time,value
cpu,"host=server01,region=uswest",10,2.5
cpu,"host=server01,region=uswest",20,5
cpu,"host=server01,region=uswest",30,
cpu,"host=server01,region=uswest",40,foobar
cpu,"host=server01,region=uswest",50,true
cpu,"host=server01,region=uswest",60,false
`; got != want {
t.Errorf("unexpected output:\n\ngot=%v\nwant=%s", got, want)
}
}
......@@ -147,7 +147,7 @@ func setMapValues(m map[string]interface{}, p models.Point) {
panic(err)
}
m["%f"] = getRandomFieldKey(fields)
m["%m"] = p.Name()
m["%m"] = string(p.Name())
m["%t"] = getRandomTagPair(p.Tags())
m["%a"] = p.UnixNano()
}
......
......@@ -9,8 +9,8 @@ func TestCommunePoint(t *testing.T) {
pt := "write,tag=tagVal fooField=5 1460912595"
comm.ch <- pt
point := comm.point("s")
if point.Name() != "write" {
t.Errorf("expected: write\ngot: %v", point.Name())
if string(point.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(point.Name()))
}
if point.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
......@@ -25,8 +25,8 @@ func TestCommunePoint(t *testing.T) {
// Make sure commune returns the prev point
comm.ch <- ""
point = comm.point("s")
if point.Name() != "write" {
t.Errorf("expected: write\ngot: %v", point.Name())
if string(point.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(point.Name()))
}
if point.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", point.Tags().GetString("tag"))
......@@ -41,8 +41,8 @@ func TestSetCommune(t *testing.T) {
ch := sf.SetCommune("foo_name")
ch <- "write,tag=tagVal fooField=5 1460912595"
pt := sf.GetPoint("foo_name", "s")
if pt.Name() != "write" {
t.Errorf("expected: write\ngot: %v", pt.Name())
if string(pt.Name()) != "write" {
t.Errorf("expected: write\ngot: %v", string(pt.Name()))
}
if pt.Tags().GetString("tag") != "tagVal" {
t.Errorf("expected: tagVal\ngot: %v", pt.Tags().GetString("tag"))
......
......@@ -131,18 +131,18 @@ fi
case $ENVIRONMENT_INDEX in
0)
# 64 bit tests
run_test_docker Dockerfile_build_ubuntu64 test_64bit --generate --test --junit-report
run_test_docker Dockerfile_build_ubuntu64 test_64bit --test --junit-report
rc=$?
;;
1)
# 64 bit race tests
GORACE="halt_on_error=1"
run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --generate --test --junit-report --race
run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --test --junit-report --race
rc=$?
;;
2)
# 32 bit tests
run_test_docker Dockerfile_build_ubuntu32 test_32bit --generate --test --junit-report --arch=i386
run_test_docker Dockerfile_build_ubuntu32 test_32bit --test --junit-report --arch=i386
rc=$?
;;
"save")
......
......@@ -26,7 +26,6 @@ func TestMain(m *testing.M) {
c.Retention.Enabled = false
c.Monitor.StoreEnabled = false
c.Meta.LoggingEnabled = false
c.Admin.Enabled = false
c.Subscriber.Enabled = false
c.ContinuousQuery.Enabled = false
c.Data.MaxValuesPerTag = 1000000 // 1M
......@@ -4266,13 +4265,13 @@ func TestServer_Query_AggregateSelectors(t *testing.T) {
name: "distinct - time",
params: url.Values{"db": []string{"db0"}},
command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`,
exp: `{"error":"error parsing query: aggregate function distinct() cannot be combined with other functions or fields"}`,
},
&Query{
name: "distinct - tx",
params: url.Values{"db": []string{"db0"}},
command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`,
exp: `{"error":"error parsing query: aggregate function distinct() can not be combined with other functions or fields"}`,
exp: `{"error":"error parsing query: aggregate function distinct() cannot be combined with other functions or fields"}`,
},
&Query{
name: "mean - baseline 30s",
......@@ -4631,19 +4630,19 @@ func TestServer_Query_TopInt(t *testing.T) {
name: "top - cpu - hourly",
params: url.Values{"db": []string{"db0"}},
command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T01:00:00Z",7],["2000-01-01T02:00:00Z",9]]}]}]}`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
},
&Query{
name: "top - cpu - 2 values hourly",
params: url.Values{"db": []string{"db0"}},
command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
},
&Query{
name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket",
params: url.Values{"db": []string{"db0"}},
command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",7],["2000-01-01T01:00:00Z",6],["2000-01-01T01:00:00Z",5],["2000-01-01T02:00:00Z",9],["2000-01-01T02:00:00Z",7]]}]}]}`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",5],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`,
},
&Query{
name: "top - memory - 2 values, two tags",
......@@ -4986,6 +4985,11 @@ func TestServer_Query_Subqueries(t *testing.T) {
},
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host"],"values":[["2000-01-01T00:00:20Z","server01"]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"]]}]}]}`,
},
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`,
},
......@@ -5007,7 +5011,7 @@ func TestServer_Query_Subqueries(t *testing.T) {
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`,
exp: `{"results":[{"statement_id":0}]}`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server01"],["2000-01-01T00:00:00Z","server02"]]}]}]}`,
},
&Query{
params: url.Values{"db": []string{"db0"}},
......@@ -5064,6 +5068,21 @@ func TestServer_Query_Subqueries(t *testing.T) {
command: `SELECT value FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`,
},
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT max FROM (SELECT max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",70]]}]}]}`,
},
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`,
},
&Query{
params: url.Values{"db": []string{"db0"}},
command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host =~ /server/`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",-2]]}]}]}`,
},
}...)
for i, query := range test.queries {
......@@ -7930,6 +7949,64 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) {
}
}
func TestServer_Query_Sample_LimitOffset(t *testing.T) {
t.Parallel()
s := OpenServer(NewConfig())
defer s.Close()
if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil {
t.Fatal(err)
}
writes := []string{
fmt.Sprintf(`cpu float=1,int=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()),
fmt.Sprintf(`cpu float=2,int=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()),
fmt.Sprintf(`cpu float=3,int=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()),
}
test := NewTest("db0", "rp0")
test.writes = Writes{
&Write{data: strings.Join(writes, "\n")},
}
test.addQueries([]*Query{
&Query{
name: "sample() with limit 1",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sample(float, 3), int FROM cpu LIMIT 1`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:00:00Z",1,1]]}]}]}`,
},
&Query{
name: "sample() with offset 1",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sample(float, 3), int FROM cpu OFFSET 1`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2],["2000-01-01T00:02:00Z",3,3]]}]}]}`,
},
&Query{
name: "sample() with limit 1 offset 1",
params: url.Values{"db": []string{"db0"}},
command: `SELECT sample(float, 3), int FROM cpu LIMIT 1 OFFSET 1`,
exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2]]}]}]}`,
},
}...)
if err := test.init(s); err != nil {
t.Fatalf("test init failed: %s", err)
}
for _, query := range test.queries {
if query.skip {
t.Logf("SKIP:: %s", query.name)
continue
}
if err := query.Execute(s); err != nil {
t.Error(query.Error(err))
} else if !query.success() {
t.Error(query.failureMessage())
}
}
}
// Validate that nested aggregates don't panic
func TestServer_NestedAggregateWithMathPanics(t *testing.T) {
t.Parallel()
......
......@@ -47,6 +47,10 @@ const (
// DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement.
DefaultMaxValuesPerTag = 100000
// DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions
// that can run at one time. A value of results in runtime.GOMAXPROCS(0) used at runtime.
DefaultMaxConcurrentCompactions = 0
)
// Config holds the configuration for the tsbd package.
......@@ -84,6 +88,12 @@ type Config struct {
// A value of 0 disables the limit.
MaxValuesPerTag int `toml:"max-values-per-tag"`
// MaxConcurrentCompactions is the maximum number of concurrent level and full compactions
// that can be running at one time across all shards. Compactions scheduled to run when the
// limit is reached are blocked until a running compaction completes. Snapshot compactions are
// not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0).
MaxConcurrentCompactions int `toml:"max-concurrent-compactions"`
TraceLoggingEnabled bool `toml:"trace-logging-enabled"`
}
......@@ -100,8 +110,9 @@ func NewConfig() Config {
CacheSnapshotWriteColdDuration: toml.Duration(DefaultCacheSnapshotWriteColdDuration),
CompactFullWriteColdDuration: toml.Duration(DefaultCompactFullWriteColdDuration),
MaxSeriesPerDatabase: DefaultMaxSeriesPerDatabase,
MaxValuesPerTag: DefaultMaxValuesPerTag,
MaxSeriesPerDatabase: DefaultMaxSeriesPerDatabase,
MaxValuesPerTag: DefaultMaxValuesPerTag,
MaxConcurrentCompactions: DefaultMaxConcurrentCompactions,
TraceLoggingEnabled: false,
}
......@@ -115,6 +126,10 @@ func (c *Config) Validate() error {
return errors.New("Data.WALDir must be specified")
}
if c.MaxConcurrentCompactions < 0 {
return errors.New("max-concurrent-compactions must be greater than 0")
}
valid := false
for _, e := range RegisteredEngines() {
if e == c.Engine {
......@@ -152,5 +167,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
"compact-full-write-cold-duration": c.CompactFullWriteColdDuration,
"max-series-per-database": c.MaxSeriesPerDatabase,
"max-values-per-tag": c.MaxValuesPerTag,
"max-concurrent-compactions": c.MaxConcurrentCompactions,
}), nil
}
......
......@@ -12,6 +12,7 @@ import (
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/influxdb/pkg/estimator"
"github.com/influxdata/influxdb/pkg/limiter"
"github.com/uber-go/zap"
)
......@@ -30,6 +31,8 @@ type Engine interface {
Open() error
Close() error
SetEnabled(enabled bool)
SetCompactionsEnabled(enabled bool)
WithLogger(zap.Logger)
LoadMetadataIndex(shardID uint64, index Index) error
......@@ -37,6 +40,7 @@ type Engine interface {
CreateSnapshot() (string, error)
Backup(w io.Writer, basePath string, since time.Time) error
Restore(r io.Reader, basePath string) error
Import(r io.Reader, basePath string) error
CreateIterator(measurement string, opt influxql.IteratorOptions) (influxql.Iterator, error)
WritePoints(points []models.Point) error
......@@ -52,7 +56,7 @@ type Engine interface {
MeasurementExists(name []byte) (bool, error)
MeasurementNamesByExpr(expr influxql.Expr) ([][]byte, error)
MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error)
MeasurementFields(measurement string) *MeasurementFields
MeasurementFields(measurement []byte) *MeasurementFields
ForEachMeasurementName(fn func(name []byte) error) error
DeleteMeasurement(name []byte) error
......@@ -70,6 +74,8 @@ type Engine interface {
// Statistics will return statistics relevant to this engine.
Statistics(tags map[string]string) []models.Statistic
LastModified() time.Time
DiskSize() int64
IsIdle() bool
io.WriterTo
}
......@@ -135,10 +141,11 @@ func NewEngine(id uint64, i Index, path string, walPath string, options EngineOp
// EngineOptions represents the options used to initialize the engine.
type EngineOptions struct {
EngineVersion string
IndexVersion string
ShardID uint64
InmemIndex interface{} // shared in-memory index
EngineVersion string
IndexVersion string
ShardID uint64
InmemIndex interface{} // shared in-memory index
CompactionLimiter limiter.Fixed
Config Config
}
......
......@@ -260,7 +260,7 @@ func (c *Cache) Write(key string, values []Value) error {
// Enough room in the cache?
limit := c.maxSize
n := c.Size() + atomic.LoadUint64(&c.snapshotSize) + addedSize
n := c.Size() + addedSize
if limit > 0 && n > limit {
atomic.AddInt64(&c.stats.WriteErr, 1)
......@@ -293,7 +293,7 @@ func (c *Cache) WriteMulti(values map[string][]Value) error {
// Enough room in the cache?
limit := c.maxSize // maxSize is safe for reading without a lock.
n := c.Size() + atomic.LoadUint64(&c.snapshotSize) + addedSize
n := c.Size() + addedSize
if limit > 0 && n > limit {
atomic.AddInt64(&c.stats.WriteErr, 1)
return ErrCacheMemorySizeLimitExceeded(n, limit)
......@@ -416,7 +416,7 @@ func (c *Cache) ClearSnapshot(success bool) {
// Size returns the number of point-calcuated bytes the cache currently uses.
func (c *Cache) Size() uint64 {
return atomic.LoadUint64(&c.size)
return atomic.LoadUint64(&c.size) + atomic.LoadUint64(&c.snapshotSize)
}
// increaseSize increases size by delta.
......
......@@ -448,7 +448,7 @@ func TestCache_Snapshot_Stats(t *testing.T) {
}
// Store size should have been reset.
if got, exp := c.Size(), uint64(0); got != exp {
if got, exp := c.Size(), uint64(16); got != exp {
t.Fatalf("got %v, expected %v", got, exp)
}
......@@ -579,6 +579,10 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
// Load the cache using the segment.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
......@@ -643,6 +647,9 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
if err := w1.Write(mustMarshalEntry(entry)); err != nil {
t.Fatal("write points", err)
}
if err := w1.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
}
values := map[string][]Value{
......@@ -707,6 +714,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
dentry := &DeleteRangeWALEntry{
Keys: []string{"foo"},
Min: 2,
......@@ -717,6 +728,10 @@ func TestCacheLoader_LoadDeleted(t *testing.T) {
t.Fatal("write points", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("flush error: %v", err)
}
// Load the cache using the segment.
cache := NewCache(1024, "")
loader := NewCacheLoader([]string{f.Name()})
......
......@@ -53,6 +53,8 @@ type CompactionPlanner interface {
Plan(lastWrite time.Time) []CompactionGroup
PlanLevel(level int) []CompactionGroup
PlanOptimize() []CompactionGroup
Release(group []CompactionGroup)
FullyCompacted() bool
}
// DefaultPlanner implements CompactionPlanner using a strategy to roll up
......@@ -60,17 +62,13 @@ type CompactionPlanner interface {
// to minimize the number of TSM files on disk while rolling up a bounder number
// of files.
type DefaultPlanner struct {
FileStore interface {
Stats() []FileStat
LastModified() time.Time
BlockCount(path string, idx int) int
}
FileStore fileStore
// CompactFullWriteColdDuration specifies the length of time after
// compactFullWriteColdDuration specifies the length of time after
// which if no writes have been committed to the WAL, the engine will
// do a full compaction of the TSM files in this shard. This duration
// should always be greater than the CacheFlushWriteColdDuraion
CompactFullWriteColdDuration time.Duration
compactFullWriteColdDuration time.Duration
// lastPlanCheck is the last time Plan was called
lastPlanCheck time.Time
......@@ -81,6 +79,24 @@ type DefaultPlanner struct {
// lastGenerations is the last set of generations found by findGenerations
lastGenerations tsmGenerations
// filesInUse is the set of files that have been returned as part of a plan and might
// be being compacted. Two plans should not return the same file at any given time.
filesInUse map[string]struct{}
}
type fileStore interface {
Stats() []FileStat
LastModified() time.Time
BlockCount(path string, idx int) int
}
func NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPlanner {
return &DefaultPlanner{
FileStore: fs,
compactFullWriteColdDuration: writeColdDuration,
filesInUse: make(map[string]struct{}),
}
}
// tsmGeneration represents the TSM files within a generation.
......@@ -106,7 +122,7 @@ func (t *tsmGeneration) level() int {
// 1 file with a sequence num of 1. Level 2 is generated by compacting multiple
// level 1 files. Level 3 is generate by compacting multiple level 2 files. Level
// 4 is for anything else.
_, seq, _ := ParseTSMFileName(t.files[0].Path)
_, seq, _ := ParseTSMFileName(t.files[len(t.files)-1].Path)
if seq < 4 {
return seq
}
......@@ -129,6 +145,12 @@ func (t *tsmGeneration) hasTombstones() bool {
return false
}
// FullyCompacted returns true if the shard is fully compacted.
func (c *DefaultPlanner) FullyCompacted() bool {
gens := c.findGenerations()
return len(gens) <= 1 && !gens.hasTombstones()
}
// PlanLevel returns a set of TSM files to rewrite for a specific level.
func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
// Determine the generations from all files on disk. We need to treat
......@@ -149,7 +171,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
for i := 0; i < len(generations); i++ {
cur := generations[i]
if len(currentGen) == 0 || currentGen[0].level() == cur.level() {
if len(currentGen) == 0 || currentGen.level() == cur.level() {
currentGen = append(currentGen, cur)
continue
}
......@@ -166,7 +188,7 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
// Remove any groups in the wrong level
var levelGroups []tsmGenerations
for _, cur := range groups {
if cur[0].level() == level {
if cur.level() == level {
levelGroups = append(levelGroups, cur)
}
}
......@@ -205,6 +227,10 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup {
}
}
if !c.acquire(cGroups) {
return nil
}
return cGroups
}
......@@ -230,7 +256,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
for i := 0; i < len(generations); i++ {
cur := generations[i]
if len(currentGen) == 0 || currentGen[0].level() == cur.level() {
if len(currentGen) == 0 || currentGen.level() == cur.level() {
currentGen = append(currentGen, cur)
continue
}
......@@ -248,7 +274,7 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
// with the level planners
var levelGroups []tsmGenerations
for _, cur := range groups {
if cur[0].level() == 4 {
if cur.level() == 4 {
levelGroups = append(levelGroups, cur)
}
}
......@@ -270,6 +296,10 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup {
cGroups = append(cGroups, cGroup)
}
if !c.acquire(cGroups) {
return nil
}
return cGroups
}
......@@ -279,7 +309,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
generations := c.findGenerations()
// first check if we should be doing a full compaction because nothing has been written in a long time
if c.CompactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.CompactFullWriteColdDuration && len(generations) > 1 {
if c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 {
var tsmFiles []string
var genCount int
for i, group := range generations {
......@@ -316,7 +346,11 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
return nil
}
return []CompactionGroup{tsmFiles}
group := []CompactionGroup{tsmFiles}
if !c.acquire(group) {
return nil
}
return group
}
// don't plan if nothing has changed in the filestore
......@@ -449,6 +483,9 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup {
tsmFiles = append(tsmFiles, cGroup)
}
if !c.acquire(tsmFiles) {
return nil
}
return tsmFiles
}
......@@ -496,6 +533,40 @@ func (c *DefaultPlanner) findGenerations() tsmGenerations {
return orderedGenerations
}
func (c *DefaultPlanner) acquire(groups []CompactionGroup) bool {
c.mu.Lock()
defer c.mu.Unlock()
// See if the new files are already in use
for _, g := range groups {
for _, f := range g {
if _, ok := c.filesInUse[f]; ok {
return false
}
}
}
// Mark all the new files in use
for _, g := range groups {
for _, f := range g {
c.filesInUse[f] = struct{}{}
}
}
return true
}
// Release removes the files reference in each compaction group allowing new plans
// to be able to use them.
func (c *DefaultPlanner) Release(groups []CompactionGroup) {
c.mu.Lock()
defer c.mu.Unlock()
for _, g := range groups {
for _, f := range g {
delete(c.filesInUse, f)
}
}
}
// Compactor merges multiple TSM files into new files or
// writes a Cache into 1 or more TSM files.
type Compactor struct {
......@@ -1249,6 +1320,17 @@ func (a tsmGenerations) hasTombstones() bool {
return false
}
func (a tsmGenerations) level() int {
var level int
for _, g := range a {
lev := g.level()
if lev > level {
level = lev
}
}
return level
}
func (a tsmGenerations) chunk(size int) []tsmGenerations {
var chunks []tsmGenerations
for len(a) > 0 {
......
......@@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/influxdata/influxdb/tsdb"
"github.com/influxdata/influxdb/tsdb/engine/tsm1"
)
......@@ -1090,8 +1091,8 @@ func TestCacheKeyIterator_Chunked(t *testing.T) {
}
func TestDefaultPlanner_Plan_Min(t *testing.T) {
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return []tsm1.FileStat{
tsm1.FileStat{
......@@ -1108,8 +1109,8 @@ func TestDefaultPlanner_Plan_Min(t *testing.T) {
},
}
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
tsm := cp.Plan(time.Now())
if exp, got := 0, len(tsm); got != exp {
......@@ -1151,13 +1152,13 @@ func TestDefaultPlanner_Plan_CombineSequence(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
tsm := cp.Plan(time.Now())
......@@ -1213,13 +1214,11 @@ func TestDefaultPlanner_Plan_MultipleGroups(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
cp := tsm1.NewDefaultPlanner(&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
}
}, tsdb.DefaultCompactFullWriteColdDuration)
expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]}
......@@ -1280,13 +1279,13 @@ func TestDefaultPlanner_PlanLevel_SmallestCompactionStep(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[4], data[5]}
tsm := cp.PlanLevel(1)
......@@ -1312,11 +1311,11 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) {
Size: 1 * 1024 * 1024,
},
tsm1.FileStat{
Path: "03-03.tsm1",
Path: "03-02.tsm1",
Size: 2 * 1024 * 1024 * 1024,
},
tsm1.FileStat{
Path: "03-04.tsm1",
Path: "03-03.tsm1",
Size: 10 * 1024 * 1024,
},
tsm1.FileStat{
......@@ -1333,13 +1332,13 @@ func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4]}
tsm := cp.PlanLevel(3)
......@@ -1382,13 +1381,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedLowLevel(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[2], data[3]}
tsm := cp.PlanLevel(1)
......@@ -1435,13 +1434,13 @@ func TestDefaultPlanner_PlanLevel_IsolatedHighLevel(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{}
tsm := cp.PlanLevel(3)
......@@ -1478,13 +1477,13 @@ func TestDefaultPlanner_PlanLevel3_MinFiles(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{}
tsm := cp.PlanLevel(3)
......@@ -1510,13 +1509,13 @@ func TestDefaultPlanner_PlanLevel2_MinFiles(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{}
tsm := cp.PlanLevel(2)
......@@ -1554,13 +1553,13 @@ func TestDefaultPlanner_PlanLevel_Tombstone(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[0], data[1]}
tsm := cp.PlanLevel(3)
......@@ -1603,13 +1602,13 @@ func TestDefaultPlanner_PlanLevel_Multiple(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
expFiles2 := []tsm1.FileStat{data[4], data[5]}
......@@ -1652,13 +1651,13 @@ func TestDefaultPlanner_PlanOptimize_NoLevel4(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{}
tsm := cp.PlanOptimize()
......@@ -1695,13 +1694,13 @@ func TestDefaultPlanner_PlanOptimize_Level4(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
tsm := cp.PlanOptimize()
......@@ -1760,13 +1759,13 @@ func TestDefaultPlanner_PlanOptimize_Multiple(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
expFiles2 := []tsm1.FileStat{data[5], data[6], data[7], data[8]}
......@@ -1813,13 +1812,13 @@ func TestDefaultPlanner_PlanOptimize_Optimized(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{}
tsm := cp.PlanOptimize()
......@@ -1845,13 +1844,13 @@ func TestDefaultPlanner_PlanOptimize_Tombstones(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[0], data[1], data[2]}
tsm := cp.PlanOptimize()
......@@ -1897,14 +1896,14 @@ func TestDefaultPlanner_Plan_FullOnCold(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
CompactFullWriteColdDuration: time.Nanosecond,
}
time.Nanosecond,
)
tsm := cp.Plan(time.Now().Add(-time.Second))
if exp, got := len(data), len(tsm[0]); got != exp {
......@@ -1932,13 +1931,13 @@ func TestDefaultPlanner_Plan_SkipMaxSizeFiles(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
tsm := cp.Plan(time.Now())
if exp, got := 0, len(tsm); got != exp {
......@@ -1975,15 +1974,13 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) {
blockCount: 1000,
}
cp := &tsm1.DefaultPlanner{
FileStore: fs,
CompactFullWriteColdDuration: time.Nanosecond,
}
cp := tsm1.NewDefaultPlanner(fs, time.Nanosecond)
plan := cp.Plan(time.Now().Add(-time.Second))
// first verify that our test set would return files
if exp, got := 4, len(cp.Plan(time.Now().Add(-time.Second))[0]); got != exp {
if exp, got := 4, len(plan[0]); got != exp {
t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
}
cp.Release(plan)
// skip planning if all files are over the limit
over := []tsm1.FileStat{
......@@ -2017,14 +2014,18 @@ func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) {
}
cp.FileStore = overFs
if exp, got := 0, len(cp.Plan(time.Now().Add(-time.Second))); got != exp {
plan = cp.Plan(time.Now().Add(-time.Second))
if exp, got := 0, len(plan); got != exp {
t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
}
cp.Release(plan)
plan = cp.PlanOptimize()
// ensure the optimize planner would pick this up
if exp, got := 1, len(cp.PlanOptimize()); got != exp {
if exp, got := 1, len(plan); got != exp {
t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
}
cp.Release(plan)
cp.FileStore = fs
// ensure that it will plan if last modified has changed
......@@ -2082,15 +2083,14 @@ func TestDefaultPlanner_Plan_TwoGenLevel3(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
blockCount: 1000,
PathsFn: func() []tsm1.FileStat {
return data
},
},
CompactFullWriteColdDuration: time.Hour,
}
time.Hour)
tsm := cp.Plan(time.Now().Add(-24 * time.Hour))
if exp, got := 1, len(tsm); got != exp {
......@@ -2127,15 +2127,17 @@ func TestDefaultPlanner_Plan_NotFullOverMaxsize(t *testing.T) {
blockCount: 100,
}
cp := &tsm1.DefaultPlanner{
FileStore: fs,
CompactFullWriteColdDuration: time.Nanosecond,
}
cp := tsm1.NewDefaultPlanner(
fs,
time.Nanosecond,
)
plan := cp.Plan(time.Now().Add(-time.Second))
// first verify that our test set would return files
if exp, got := 4, len(cp.Plan(time.Now().Add(-time.Second))[0]); got != exp {
if exp, got := 4, len(plan[0]); got != exp {
t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp)
}
cp.Release(plan)
// skip planning if all files are over the limit
over := []tsm1.FileStat{
......@@ -2188,13 +2190,13 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) {
},
}
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return data
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]}
tsm := cp.Plan(time.Now())
......@@ -2210,8 +2212,8 @@ func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) {
}
func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return []tsm1.FileStat{
tsm1.FileStat{
......@@ -2236,8 +2238,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
},
}
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
tsm := cp.Plan(time.Now())
if exp, got := 0, len(tsm); got != exp {
......@@ -2246,8 +2248,8 @@ func TestDefaultPlanner_Plan_LargeSets(t *testing.T) {
}
func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) {
cp := &tsm1.DefaultPlanner{
FileStore: &fakeFileStore{
cp := tsm1.NewDefaultPlanner(
&fakeFileStore{
PathsFn: func() []tsm1.FileStat {
return []tsm1.FileStat{
tsm1.FileStat{
......@@ -2272,8 +2274,8 @@ func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) {
},
}
},
},
}
}, tsdb.DefaultCompactFullWriteColdDuration,
)
tsm := cp.Plan(time.Now())
if exp, got := 0, len(tsm); got != exp {
......
This diff is collapsed. Click to expand it.