TUN-9467: bump coredns to solve CVE

* TUN-9467: bump coredns to solve CVE
This commit is contained in:
João Oliveirinha
2025-06-12 10:46:10 +00:00
committed by João "Pisco" Fernandes
parent f8d12c9d39
commit a408612f26
459 changed files with 30077 additions and 16165 deletions

View File

@@ -201,10 +201,9 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
}
}
wait := make(chan error)
wait := make(chan error, 1)
go func() {
wait <- bsp.exportSpans(ctx)
close(wait)
}()
// Wait until the export is finished or the context is cancelled/timed out
select {
@@ -280,6 +279,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
//
// It is up to the exporter to implement any type of retry logic if a batch is failing
// to be exported, since it is specific to the protocol and backend being sent to.
clear(bsp.batch) // Erase elements to let GC collect objects
bsp.batch = bsp.batch[:0]
if err != nil {
@@ -316,7 +316,11 @@ func (bsp *batchSpanProcessor) processQueue() {
bsp.batchMutex.Unlock()
if shouldExport {
if !bsp.timer.Stop() {
<-bsp.timer.C
// Handle both GODEBUG=asynctimerchan=[0|1] properly.
select {
case <-bsp.timer.C:
default:
}
}
if err := bsp.exportSpans(ctx); err != nil {
otel.Handle(err)
@@ -381,7 +385,7 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
}
}
func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}

View File

@@ -3,23 +3,44 @@
package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"slices"
"sync"
"go.opentelemetry.io/otel/internal/global"
)
// evictedQueue is a FIFO queue with a configurable capacity.
type evictedQueue struct {
queue []interface{}
capacity int
droppedCount int
type evictedQueue[T any] struct {
queue []T
capacity int
droppedCount int
logDroppedMsg string
logDroppedOnce sync.Once
}
func newEvictedQueue(capacity int) evictedQueue {
func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
// Do not pre-allocate queue, do this lazily.
return evictedQueue{capacity: capacity}
return evictedQueue[Event]{
capacity: capacity,
logDroppedMsg: "limit reached: dropping trace trace.Event",
}
}
func newEvictedQueueLink(capacity int) evictedQueue[Link] {
// Do not pre-allocate queue, do this lazily.
return evictedQueue[Link]{
capacity: capacity,
logDroppedMsg: "limit reached: dropping trace trace.Link",
}
}
// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
// queued value will be discarded and the drop count incremented.
func (eq *evictedQueue) add(value interface{}) {
func (eq *evictedQueue[T]) add(value T) {
if eq.capacity == 0 {
eq.droppedCount++
eq.logDropped()
return
}
@@ -28,6 +49,16 @@ func (eq *evictedQueue) add(value interface{}) {
copy(eq.queue[:eq.capacity-1], eq.queue[1:])
eq.queue = eq.queue[:eq.capacity-1]
eq.droppedCount++
eq.logDropped()
}
eq.queue = append(eq.queue, value)
}
func (eq *evictedQueue[T]) logDropped() {
eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
}
// copy returns a copy of the evictedQueue.
func (eq *evictedQueue[T]) copy() []T {
return slices.Clone(eq.queue)
}

View File

@@ -41,7 +41,12 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
gen.Lock()
defer gen.Unlock()
sid := trace.SpanID{}
_, _ = gen.randSource.Read(sid[:])
for {
_, _ = gen.randSource.Read(sid[:])
if sid.IsValid() {
break
}
}
return sid
}
@@ -51,9 +56,19 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.
gen.Lock()
defer gen.Unlock()
tid := trace.TraceID{}
_, _ = gen.randSource.Read(tid[:])
sid := trace.SpanID{}
_, _ = gen.randSource.Read(sid[:])
for {
_, _ = gen.randSource.Read(tid[:])
if tid.IsValid() {
break
}
}
for {
_, _ = gen.randSource.Read(sid[:])
if sid.IsValid() {
break
}
}
return tid, sid
}

View File

@@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
name = defaultTracerName
}
is := instrumentation.Scope{
Name: name,
Version: c.InstrumentationVersion(),
SchemaURL: c.SchemaURL(),
Name: name,
Version: c.InstrumentationVersion(),
SchemaURL: c.SchemaURL(),
Attributes: c.InstrumentationAttributes(),
}
t, ok := func() (trace.Tracer, bool) {
@@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
// slowing down all tracing consumers.
// - Logging code may be instrumented with tracing and deadlock because it could try
// acquiring the same non-reentrant mutex.
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL)
global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes)
}
return t
}
@@ -291,7 +292,7 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error {
retErr = err
} else {
// Poor man's list of errors
retErr = fmt.Errorf("%v; %v", retErr, err)
retErr = fmt.Errorf("%w; %w", retErr, err)
}
}
}

View File

@@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"errors"
"fmt"
"os"
"strconv"
"strings"
@@ -26,7 +25,7 @@ const (
type errUnsupportedSampler string
func (e errUnsupportedSampler) Error() string {
return fmt.Sprintf("unsupported sampler: %s", string(e))
return "unsupported sampler: " + string(e)
}
var (
@@ -39,7 +38,7 @@ type samplerArgParseError struct {
}
func (e samplerArgParseError) Error() string {
return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error())
return "parsing sampler argument: " + e.parseErr.Error()
}
func (e samplerArgParseError) Unwrap() error {

View File

@@ -47,12 +47,12 @@ const (
// Drop will not record the span and all attributes/events will be dropped.
Drop SamplingDecision = iota
// Record indicates the span's `IsRecording() == true`, but `Sampled` flag
// *must not* be set.
// RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag
// must not be set.
RecordOnly
// RecordAndSample has span's `IsRecording() == true` and `Sampled` flag
// *must* be set.
// RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag
// must be set.
RecordAndSample
)

View File

@@ -58,7 +58,7 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
var err error
ssp.stopOnce.Do(func() {
stopFunc := func(exp SpanExporter) (<-chan error, func()) {
done := make(chan error)
done := make(chan error, 1)
return done, func() { done <- exp.Shutdown(ctx) }
}

View File

@@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope {
// InstrumentationLibrary returns information about the instrumentation
// library that created the span.
func (s snapshot) InstrumentationLibrary() instrumentation.Library {
func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
return s.instrumentationScope
}

View File

@@ -17,10 +17,10 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/internal"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -62,7 +62,7 @@ type ReadOnlySpan interface {
// InstrumentationLibrary returns information about the instrumentation
// library that created the span.
// Deprecated: please use InstrumentationScope instead.
InstrumentationLibrary() instrumentation.Library
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
// Resource returns information about the entity that produced the span.
Resource() *resource.Resource
// DroppedAttributes returns the number of attributes dropped by the span
@@ -137,12 +137,13 @@ type recordingSpan struct {
// ReadOnlySpan exported when the span ends.
attributes []attribute.KeyValue
droppedAttributes int
logDropAttrsOnce sync.Once
// events are stored in FIFO queue capped by configured limit.
events evictedQueue
events evictedQueue[Event]
// links are stored in FIFO queue capped by configured limit.
links evictedQueue
links evictedQueue[Link]
// executionTracerTaskEnd ends the execution tracer span.
executionTracerTaskEnd func()
@@ -173,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.isRecording()
}
// isRecording returns if this span is being recorded. If this span has ended
// this will return false.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) isRecording() bool {
if s == nil {
return false
}
return s.endTime.IsZero()
}
@@ -181,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool {
// included in the set status when the code is for an error. If this span is
// not being recorded than this method does nothing.
func (s *recordingSpan) SetStatus(code codes.Code, description string) {
if !s.IsRecording() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
if s.status.Code > code {
return
}
@@ -209,17 +225,20 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) {
// attributes the span is configured to have, the last added attributes will
// be dropped.
func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
if !s.IsRecording() {
if s == nil || len(attributes) == 0 {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
limit := s.tracer.provider.spanLimits.AttributeCountLimit
if limit == 0 {
// No attributes allowed.
s.droppedAttributes += len(attributes)
s.addDroppedAttr(len(attributes))
return
}
@@ -232,11 +251,11 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
// Otherwise, add without deduplication. When attributes are read they
// will be deduplicated, optimizing the operation.
s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes))
s.attributes = slices.Grow(s.attributes, len(attributes))
for _, a := range attributes {
if !a.Valid() {
// Drop all invalid attributes.
s.droppedAttributes++
s.addDroppedAttr(1)
continue
}
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
@@ -244,6 +263,22 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
}
}
// Declared as a var so tests can override.
var logDropAttrs = func() {
global.Warn("limit reached: dropping trace Span attributes")
}
// addDroppedAttr adds incr to the count of dropped attributes.
//
// The first, and only the first, time this method is called a warning will be
// logged.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) addDroppedAttr(incr int) {
s.droppedAttributes += incr
s.logDropAttrsOnce.Do(logDropAttrs)
}
// addOverCapAttrs adds the attributes attrs to the span s while
// de-duplicating the attributes of s and attrs and dropping attributes that
// exceed the limit.
@@ -263,22 +298,27 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
// Do not set a capacity when creating this map. Benchmark testing has
// showed this to only add unused memory allocations in general use.
exists := make(map[attribute.Key]int)
s.dedupeAttrsFromRecord(&exists)
exists := make(map[attribute.Key]int, len(s.attributes))
s.dedupeAttrsFromRecord(exists)
// Now that s.attributes is deduplicated, adding unique attributes up to
// the capacity of s will not over allocate s.attributes.
sum := len(attrs) + len(s.attributes)
s.attributes = slices.Grow(s.attributes, min(sum, limit))
// max size = limit
maxCap := min(len(attrs)+len(s.attributes), limit)
if cap(s.attributes) < maxCap {
s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
}
for _, a := range attrs {
if !a.Valid() {
// Drop all invalid attributes.
s.droppedAttributes++
s.addDroppedAttr(1)
continue
}
if idx, ok := exists[a.Key]; ok {
// Perform all updates before dropping, even when at capacity.
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
s.attributes[idx] = a
continue
}
@@ -286,7 +326,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
if len(s.attributes) >= limit {
// Do not just drop all of the remaining attributes, make sure
// updates are checked and performed.
s.droppedAttributes++
s.addDroppedAttr(1)
} else {
a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
s.attributes = append(s.attributes, a)
@@ -307,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
}
switch attr.Value.Type() {
case attribute.STRING:
if v := attr.Value.AsString(); len(v) > limit {
return attr.Key.String(safeTruncate(v, limit))
}
v := attr.Value.AsString()
return attr.Key.String(truncate(limit, v))
case attribute.STRINGSLICE:
v := attr.Value.AsStringSlice()
for i := range v {
if len(v[i]) > limit {
v[i] = safeTruncate(v[i], limit)
}
v[i] = truncate(limit, v[i])
}
return attr.Key.StringSlice(v)
}
return attr
}
// safeTruncate truncates the string and guarantees valid UTF-8 is returned.
func safeTruncate(input string, limit int) string {
if trunc, ok := safeTruncateValidUTF8(input, limit); ok {
return trunc
// truncate returns a truncated version of s such that it contains less than
// the limit number of characters. Truncation is applied by returning the limit
// number of valid characters contained in s.
//
// If limit is negative, it returns the original string.
//
// UTF-8 is supported. When truncating, all invalid characters are dropped
// before applying truncation.
//
// If s already contains less than the limit number of bytes, it is returned
// unchanged. No invalid characters are removed.
func truncate(limit int, s string) string {
// This prioritize performance in the following order based on the most
// common expected use-cases.
//
// - Short values less than the default limit (128).
// - Strings with valid encodings that exceed the limit.
// - No limit.
// - Strings with invalid encodings that exceed the limit.
if limit < 0 || len(s) <= limit {
return s
}
trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit)
return trunc
}
// safeTruncateValidUTF8 returns a copy of the input string safely truncated to
// limit. The truncation is ensured to occur at the bounds of complete UTF-8
// characters. If invalid encoding of UTF-8 is encountered, input is returned
// with false, otherwise, the truncated input will be returned with true.
func safeTruncateValidUTF8(input string, limit int) (string, bool) {
for cnt := 0; cnt <= limit; {
r, size := utf8.DecodeRuneInString(input[cnt:])
if r == utf8.RuneError {
return input, false
// Optimistically, assume all valid UTF-8.
var b strings.Builder
count := 0
for i, c := range s {
if c != utf8.RuneError {
count++
if count > limit {
return s[:i]
}
continue
}
if cnt+size > limit {
return input[:cnt], true
_, size := utf8.DecodeRuneInString(s[i:])
if size == 1 {
// Invalid encoding.
b.Grow(len(s) - 1)
_, _ = b.WriteString(s[:i])
s = s[i:]
break
}
cnt += size
}
return input, true
// Fast-path, no invalid input.
if b.Cap() == 0 {
return s
}
// Truncate while validating UTF-8.
for i := 0; i < len(s) && count < limit; {
c := s[i]
if c < utf8.RuneSelf {
// Optimization for single byte runes (common case).
_ = b.WriteByte(c)
i++
count++
continue
}
_, size := utf8.DecodeRuneInString(s[i:])
if size == 1 {
// We checked for all 1-byte runes above, this is a RuneError.
i++
continue
}
_, _ = b.WriteString(s[i : i+size])
i += size
count++
}
return b.String()
}
// End ends the span. This method does nothing if the span is already ended or
// is not being recorded.
//
// The only SpanOption currently supported is WithTimestamp which will set the
// end time for a Span's life-cycle.
// The only SpanEndOption currently supported are [trace.WithTimestamp], and
// [trace.WithStackTrace].
//
// If this method is called while panicking an error event is added to the
// Span before ending it and the panic is continued.
@@ -367,11 +452,12 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
// Store the end time as soon as possible to avoid artificially increasing
// the span's duration in case some operation below takes a while.
et := internal.MonotonicEndTime(s.startTime)
et := monotonicEndTime(s.startTime)
// Do relative expensive check now that we have an end time and see if we
// need to do any more processing.
if !s.IsRecording() {
// Lock the span now that we have an end time and see if we need to do any more processing.
s.mu.Lock()
if !s.isRecording() {
s.mu.Unlock()
return
}
@@ -396,10 +482,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
if s.executionTracerTaskEnd != nil {
s.mu.Unlock()
s.executionTracerTaskEnd()
s.mu.Lock()
}
s.mu.Lock()
// Setting endTime to non-zero marks the span as ended and not recording.
if config.Timestamp().IsZero() {
s.endTime = et
@@ -418,12 +505,28 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
}
// monotonicEndTime returns the end time at present but offset from start,
// monotonically.
//
// The monotonic clock is used in subtractions hence the duration since start
// added back to start gives end as a monotonic time. See
// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
func monotonicEndTime(start time.Time) time.Time {
return start.Add(time.Since(start))
}
// RecordError will record err as a span event for this span. An additional call to
// SetStatus is required if the Status of the Span should be set to Error, this method
// does not change the Span status. If this span is not being recorded or err is nil
// than this method does nothing.
func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
if s == nil || err == nil || !s.IsRecording() {
if s == nil || err == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
@@ -459,14 +562,23 @@ func recordStackTrace() string {
}
// AddEvent adds an event with the provided name and options. If this span is
// not being recorded than this method does nothing.
// not being recorded then this method does nothing.
func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
if !s.IsRecording() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
s.addEvent(name, o...)
}
// addEvent adds an event with the provided name and options.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
c := trace.NewEventConfig(o...)
e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
@@ -483,20 +595,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
e.Attributes = e.Attributes[:limit]
}
s.mu.Lock()
s.events.add(e)
s.mu.Unlock()
}
// SetName sets the name of this span. If this span is not being recorded than
// this method does nothing.
func (s *recordingSpan) SetName(name string) {
if !s.IsRecording() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
s.name = name
}
@@ -552,29 +665,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue {
func (s *recordingSpan) dedupeAttrs() {
// Do not set a capacity when creating this map. Benchmark testing has
// showed this to only add unused memory allocations in general use.
exists := make(map[attribute.Key]int)
s.dedupeAttrsFromRecord(&exists)
exists := make(map[attribute.Key]int, len(s.attributes))
s.dedupeAttrsFromRecord(exists)
}
// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
// using record as the record of unique attribute keys to their index.
//
// This method assumes s.mu.Lock is held by the caller.
func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) {
func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
// Use the fact that slices share the same backing array.
unique := s.attributes[:0]
for _, a := range s.attributes {
if idx, ok := (*record)[a.Key]; ok {
if idx, ok := record[a.Key]; ok {
unique[idx] = a
} else {
unique = append(unique, a)
(*record)[a.Key] = len(unique) - 1
record[a.Key] = len(unique) - 1
}
}
// s.attributes have element types of attribute.KeyValue. These types are
// not pointers and they themselves do not contain pointer fields,
// therefore the duplicate values do not need to be zeroed for them to be
// garbage collected.
clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
s.attributes = unique
}
@@ -585,7 +695,7 @@ func (s *recordingSpan) Links() []Link {
if len(s.links.queue) == 0 {
return []Link{}
}
return s.interfaceArrayToLinksArray()
return s.links.copy()
}
// Events returns the events of this span.
@@ -595,7 +705,7 @@ func (s *recordingSpan) Events() []Event {
if len(s.events.queue) == 0 {
return []Event{}
}
return s.interfaceArrayToEventArray()
return s.events.copy()
}
// Status returns the status of this span.
@@ -615,7 +725,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
// InstrumentationLibrary returns the instrumentation.Library associated with
// the Tracer that created this span.
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library {
func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
s.mu.Lock()
defer s.mu.Unlock()
return s.tracer.instrumentationScope
@@ -630,7 +740,17 @@ func (s *recordingSpan) Resource() *resource.Resource {
}
func (s *recordingSpan) AddLink(link trace.Link) {
if !s.IsRecording() || !link.SpanContext.IsValid() {
if s == nil {
return
}
if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
link.SpanContext.TraceState().Len() == 0 {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
@@ -647,9 +767,7 @@ func (s *recordingSpan) AddLink(link trace.Link) {
l.Attributes = l.Attributes[:limit]
}
s.mu.Lock()
s.links.add(l)
s.mu.Unlock()
}
// DroppedAttributes returns the number of attributes dropped by the span
@@ -713,39 +831,27 @@ func (s *recordingSpan) snapshot() ReadOnlySpan {
}
sd.droppedAttributeCount = s.droppedAttributes
if len(s.events.queue) > 0 {
sd.events = s.interfaceArrayToEventArray()
sd.events = s.events.copy()
sd.droppedEventCount = s.events.droppedCount
}
if len(s.links.queue) > 0 {
sd.links = s.interfaceArrayToLinksArray()
sd.links = s.links.copy()
sd.droppedLinkCount = s.links.droppedCount
}
return &sd
}
func (s *recordingSpan) interfaceArrayToLinksArray() []Link {
linkArr := make([]Link, 0)
for _, value := range s.links.queue {
linkArr = append(linkArr, value.(Link))
}
return linkArr
}
func (s *recordingSpan) interfaceArrayToEventArray() []Event {
eventArr := make([]Event, 0)
for _, value := range s.events.queue {
eventArr = append(eventArr, value.(Event))
}
return eventArr
}
func (s *recordingSpan) addChild() {
if !s.IsRecording() {
if s == nil {
return
}
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRecording() {
return
}
s.childSpanCount++
s.mu.Unlock()
}
func (*recordingSpan) private() {}

View File

@@ -132,8 +132,8 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa
spanKind: trace.ValidateSpanKind(config.SpanKind()),
name: name,
startTime: startTime,
events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
tracer: tr,
}