8000 refactor: use the built-in max/min to simplify the code by carrychair · Pull Request #16617 · prometheus/prometheus · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

refactor: use the built-in max/min to simplify the code #16617

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions tsdb/agent/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -613,10 +613,7 @@ Loop:
//
// Subtracting a duration from ts will add a buffer for when series are
// considered inactive and safe for deletion.
ts := db.rs.LowestSentTimestamp() - db.opts.MinWALTime
if ts < 0 {
ts = 0
}
ts := max(db.rs.LowestSentTimestamp()-db.opts.MinWALTime, 0)

// Network issues can prevent the result of getRemoteWriteTimestamp from
// changing. We don't want data in the WAL to grow forever, so we set a cap
Expand Down
5 changes: 1 addition & 4 deletions tsdb/chunks/chunk_write_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,7 @@ func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChu
[]string{"operation"},
)

segmentSize := size
if segmentSize > maxChunkQueueSegmentSize {
segmentSize = maxChunkQueueSegmentSize
}
segmentSize := min(size, maxChunkQueueSegmentSize)

q := &chunkWriteQueue{
jobs: newWriteJobQueue(size, segmentSize),
Expand Down
5 changes: 1 addition & 4 deletions tsdb/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -979,10 +979,7 @@ func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rn

// Register metrics after assigning the head block.
db.metrics = newDBMetrics(db, r)
maxBytes := opts.MaxBytes
if maxBytes < 0 {
maxBytes = 0
}
maxBytes := max(opts.MaxBytes, 0)
db.metrics.maxBytes.Set(float64(maxBytes))
db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds())

Expand Down
5 changes: 1 addition & 4 deletions tsdb/exemplar.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,10 +296,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
ce.nextIndex = 0

// Replay as many entries as needed, starting with oldest first.
count := int64(len(oldBuffer))
if l < count {
count = l
}
count := min(l, int64(len(oldBuffer)))

migrated := 0

Expand Down
6 changes: 2 additions & 4 deletions tsdb/head_read.go
Original file line number Diff line number Diff line change
Expand Up @@ -568,10 +568,8 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *
continue
}
}
stopAfter = numSamples - (appendIDsToConsider - index)
if stopAfter < 0 {
stopAfter = 0 // Stopped in a previous chunk.
}
// Stopped in a previous chunk.
stopAfter = max(numSamples-(appendIDsToConsider-index), 0)
break
}
}
Expand Down
30 changes: 6 additions & 24 deletions tsdb/head_wal.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,10 +281,7 @@ Outer:
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
Expand Down Expand Up @@ -346,10 +343,7 @@ Outer:
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
Expand Down Expand Up @@ -382,10 +376,7 @@ Outer:
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
Expand Down Expand Up @@ -813,10 +804,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if shards[i] == nil {
shards[i] = processors[i].reuseBuf()
Expand Down Expand Up @@ -869,10 +857,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
Expand Down Expand Up @@ -901,10 +886,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
m := min(len(samples), 5000)
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
Expand Down
5 changes: 1 addition & 4 deletions tsdb/tombstones/tombstones.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,9 +377,6 @@ func (in Intervals) Add(n Interval) Intervals {
if n.Mint < in[mini].Mint {
in[mini].Mint = n.Mint
}
in[mini].Maxt = in[maxi+mini-1].Maxt
if n.Maxt > in[mini].Maxt {
in[mini].Maxt = n.Maxt
}
in[mini].Maxt = max(n.Maxt, in[maxi+mini-1].Maxt)
return append(in[:mini+1], in[maxi+mini:]...)
}
Loading
0