未验证 提交 d9613e5c 编写于 作者: B Bartlomiej Plotka 提交者: GitHub

Merge pull request #6565 from prometheus/fixes-for-2.15.2

Cut release 2.15.2; cherry-picked windows fix and index with unsorted postings support.
......@@ -4,6 +4,7 @@ version: 2.1
orbs:
prometheus: prometheus/prometheus@0.3.0
go: circleci/go@0.2.0
win: circleci/windows@2.3.0
executors:
# Whenever the Go version is updated here, .promu.yml
......@@ -49,6 +50,13 @@ jobs:
key: v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
paths:
- web/ui/react-app/node_modules
test_windows:
executor: win/default
working_directory: /go/src/github.com/prometheus/prometheus
steps:
- checkout
# TSDB is where the most risk is Windows wise, so only test there for now.
- run: go test ./tsdb/...
fuzzit_regression:
executor: fuzzit
working_directory: /go/src/github.com/prometheus/prometheus
......@@ -78,6 +86,10 @@ workflows:
filters:
tags:
only: /.*/
- test_windows:
filters:
tags:
only: /.*/
- fuzzit_regression:
filters:
tags:
......
## 2.15.2 / 2020-01-06
* [BUGFIX] TSDB: Fixed support for TSDB blocks built with Prometheus before 2.1.0. #6564
* [BUGFIX] TSDB: Fixed block compaction issues on Windows. #6547
## 2.15.1 / 2019-12-25
* [BUGFIX] Fixed race on concurrent queries against same data. #6512
* [BUGFIX] TSDB: Fixed race on concurrent queries against same data. #6512
## 2.15.0 / 2019-12-23
......
......@@ -197,9 +197,11 @@ func TestCorruptedChunk(t *testing.T) {
testutil.Equals(t, test.openErr.Error(), err.Error())
return
}
defer func() { testutil.Ok(t, b.Close()) }()
querier, err := NewBlockQuerier(b, 0, 1)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, querier.Close()) }()
set, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
testutil.Ok(t, err)
......@@ -265,16 +267,20 @@ func TestBlockSize(t *testing.T) {
}
func TestReadIndexFormatV1(t *testing.T) {
/* The block here was produced at commit
07ef80820ef1250db82f9544f3fcf7f0f63ccee0 with:
db, _ := Open("v1db", nil, nil, nil)
app := db.Appender()
app.Add(labels.FromStrings("foo", "bar"), 1, 2)
app.Add(labels.FromStrings("foo", "baz"), 3, 4)
app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block.
app.Commit()
db.compact()
db.Close()
/* The block here was produced at the commit
706602daed1487f7849990678b4ece4599745905 used in 2.0.0 with:
db, _ := Open("v1db", nil, nil, nil)
app := db.Appender()
app.Add(labels.FromStrings("foo", "bar"), 1, 2)
app.Add(labels.FromStrings("foo", "baz"), 3, 4)
app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block.
// Make sure we've enough values for the lack of sorting of postings offsets to show up.
for i := 0; i < 100; i++ {
app.Add(labels.FromStrings("bar", strconv.FormatInt(int64(i), 10)), 0, 0)
}
app.Commit()
db.compact()
db.Close()
*/
blockDir := filepath.Join("testdata", "index_format_v1")
......@@ -288,7 +294,7 @@ func TestReadIndexFormatV1(t *testing.T) {
q, err = NewBlockQuerier(block, 0, 1000)
testutil.Ok(t, err)
testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.$")),
testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
map[string][]tsdbutil.Sample{
`{foo="bar"}`: []tsdbutil.Sample{sample{t: 1, v: 2}},
`{foo="baz"}`: []tsdbutil.Sample{sample{t: 3, v: 4}},
......
......@@ -2655,6 +2655,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
// Check the content of the chunks.
r, err := chunks.NewDirReader(tempDir, nil)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, r.Close()) }()
for _, chks := range test.chks {
for _, chkExp := range chks {
......@@ -2705,4 +2706,5 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
}
wg.Wait()
}
testutil.Ok(t, r.Close())
}
......@@ -511,11 +511,11 @@ func (w *Writer) finishSymbols() error {
return err
}
var err error
w.symbolFile, err = fileutil.OpenMmapFile(w.f.name)
sf, err := fileutil.OpenMmapFile(w.f.name)
if err != nil {
return err
}
w.symbolFile = sf
hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
w.buf1.Reset()
w.buf1.PutBE32(hash)
......@@ -700,7 +700,11 @@ func (w *Writer) writePostingsOffsetTable() error {
if err != nil {
return err
}
defer f.Close()
defer func() {
if f != nil {
f.Close()
}
}()
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
cnt := w.cntPO
for d.Err() == nil && cnt > 0 {
......@@ -720,6 +724,10 @@ func (w *Writer) writePostingsOffsetTable() error {
}
// Cleanup temporary file.
if err := f.Close(); err != nil {
return err
}
f = nil
if err := w.fPO.close(); err != nil {
return err
}
......@@ -962,9 +970,9 @@ type labelIndexHashEntry struct {
}
func (w *Writer) Close() error {
if err := w.ensureStage(idxStageDone); err != nil {
return err
}
// Even if this fails, we need to close all the files.
ensureErr := w.ensureStage(idxStageDone)
if w.symbolFile != nil {
if err := w.symbolFile.Close(); err != nil {
return err
......@@ -980,7 +988,10 @@ func (w *Writer) Close() error {
return err
}
}
return w.f.close()
if err := w.f.close(); err != nil {
return err
}
return ensureErr
}
// StringTuples provides access to a sorted list of string tuples.
......@@ -1013,6 +1024,8 @@ type Reader struct {
// Map of LabelName to a list of some LabelValues's position in the offset table.
// The first and last values for each name are always present.
postings map[string][]postingOffset
// For the v1 format, labelname -> labelvalue -> offset.
postingsV1 map[string]map[string]uint64
symbols *Symbols
nameSymbols map[uint32]string // Cache of the label name symbol lookups,
......@@ -1102,45 +1115,64 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
return nil, errors.Wrap(err, "read symbols")
}
var lastKey []string
lastOff := 0
valueCount := 0
// For the postings offset table we keep every label name but only every nth
// label value (plus the first and last one), to save memory.
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error {
if len(key) != 2 {
return errors.Errorf("unexpected key length for posting table %d", len(key))
if r.version == FormatV1 {
// Earlier V1 formats don't have a sorted postings offset table, so
// load the whole offset table into memory.
r.postingsV1 = map[string]map[string]uint64{}
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error {
if len(key) != 2 {
return errors.Errorf("unexpected key length for posting table %d", len(key))
}
if _, ok := r.postingsV1[key[0]]; !ok {
r.postingsV1[key[0]] = map[string]uint64{}
r.postings[key[0]] = nil // Used to get a list of labelnames in places.
}
r.postingsV1[key[0]][key[1]] = off
return nil
}); err != nil {
return nil, errors.Wrap(err, "read postings table")
}
if _, ok := r.postings[key[0]]; !ok {
// Next label name.
r.postings[key[0]] = []postingOffset{}
if lastKey != nil {
// Always include last value for each label name.
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
} else {
var lastKey []string
lastOff := 0
valueCount := 0
// For the postings offset table we keep every label name but only every nth
// label value (plus the first and last one), to save memory.
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error {
if len(key) != 2 {
return errors.Errorf("unexpected key length for posting table %d", len(key))
}
if _, ok := r.postings[key[0]]; !ok {
// Next label name.
r.postings[key[0]] = []postingOffset{}
if lastKey != nil {
// Always include last value for each label name.
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
}
lastKey = nil
valueCount = 0
}
if valueCount%32 == 0 {
r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off})
lastKey = nil
} else {
lastKey = key
lastOff = off
}
lastKey = nil
valueCount = 0
valueCount++
return nil
}); err != nil {
return nil, errors.Wrap(err, "read postings table")
}
if valueCount%32 == 0 {
r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off})
lastKey = nil
} else {
lastKey = key
lastOff = off
if lastKey != nil {
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
}
// Trim any extra space in the slices.
for k, v := range r.postings {
l := make([]postingOffset, len(v))
copy(l, v)
r.postings[k] = l
}
valueCount++
return nil
}); err != nil {
return nil, errors.Wrap(err, "read postings table")
}
if lastKey != nil {
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
}
// Trim any extra space in the slices.
for k, v := range r.postings {
l := make([]postingOffset, len(v))
copy(l, v)
r.postings[k] = l
}
r.nameSymbols = make(map[uint32]string, len(r.postings))
......@@ -1397,6 +1429,19 @@ func (r *Reader) LabelValues(names ...string) (StringTuples, error) {
if len(names) != 1 {
return nil, errors.Errorf("only one label name supported")
}
if r.version == FormatV1 {
e, ok := r.postingsV1[names[0]]
if !ok {
return emptyStringTuples{}, nil
}
values := make([]string, 0, len(e))
for k := range e {
values = append(values, k)
}
sort.Strings(values)
return NewStringTuples(values, 1)
}
e, ok := r.postings[names[0]]
if !ok {
return emptyStringTuples{}, nil
......@@ -1456,6 +1501,28 @@ func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) err
}
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
if r.version == FormatV1 {
e, ok := r.postingsV1[name]
if !ok {
return EmptyPostings(), nil
}
res := make([]Postings, 0, len(values))
for _, v := range values {
postingsOff, ok := e[v]
if !ok {
continue
}
// Read from the postings table.
d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
_, p, err := r.dec.Postings(d.Get())
if err != nil {
return nil, errors.Wrap(err, "decode postings")
}
res = append(res, p)
}
return Merge(res...), nil
}
e, ok := r.postings[name]
if !ok {
return EmptyPostings(), nil
......
......@@ -280,6 +280,7 @@ func TestPostingsMany(t *testing.T) {
ir, err := NewFileReader(fn)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, ir.Close()) }()
cases := []struct {
in []string
......
{
"version": 1,
"ulid": "01DVZX4CHY2EGZ6JQVS80AB9CF",
"ulid": "01DXXFZDYD1MQW6079WK0K6EDQ",
"minTime": 0,
"maxTime": 7200000,
"stats": {
"numSamples": 2,
"numSeries": 2,
"numChunks": 2
"numSamples": 102,
"numSeries": 102,
"numChunks": 102
},
"compaction": {
"level": 1,
"sources": [
"01DVZX4CHY2EGZ6JQVS80AB9CF"
"01DXXFZDYD1MQW6079WK0K6EDQ"
]
}
}
......@@ -427,6 +427,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
}
}
}
testutil.Ok(t, w.Close())
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
checkpointDir := dir + "/wal/checkpoint.000004"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册