forked from open-telemetry/opentelemetry-collector
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlogs.go
More file actions
196 lines (168 loc) · 5.68 KB
/
logs.go
File metadata and controls
196 lines (168 loc) · 5.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper"
import (
"context"
"errors"
"go.uber.org/zap"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/consumer/consumererror"
"go.opentelemetry.io/collector/exporter"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/request"
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/sizer"
"go.opentelemetry.io/collector/pdata/plog"
pdatareq "go.opentelemetry.io/collector/pdata/xpdata/request"
"go.opentelemetry.io/collector/pipeline"
)
var (
logsMarshaler = &plog.ProtoMarshaler{}
logsUnmarshaler = &plog.ProtoUnmarshaler{}
)
// NewLogsQueueBatchSettings returns a new QueueBatchSettings to configure to WithQueueBatch when using plog.Logs.
// Experimental: This API is at the early stage of development and may change without backward compatibility
// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved.
func NewLogsQueueBatchSettings() QueueBatchSettings {
return QueueBatchSettings{
Encoding: logsEncoding{},
Sizers: map[RequestSizerType]request.Sizer[Request]{
RequestSizerTypeRequests: NewRequestsSizer(),
RequestSizerTypeItems: request.NewItemsSizer(),
RequestSizerTypeBytes: request.BaseSizer{
SizeofFunc: func(req request.Request) int64 {
return int64(logsMarshaler.LogsSize(req.(*logsRequest).ld))
},
},
},
}
}
type logsRequest struct {
ld plog.Logs
cachedSize int
}
func newLogsRequest(ld plog.Logs) Request {
return &logsRequest{
ld: ld,
cachedSize: -1,
}
}
type logsEncoding struct{}
var _ QueueBatchEncoding[Request] = logsEncoding{}
func (logsEncoding) Unmarshal(bytes []byte) (context.Context, Request, error) {
if queue.PersistRequestContextOnRead {
ctx, logs, err := pdatareq.UnmarshalLogs(bytes)
if errors.Is(err, pdatareq.ErrInvalidFormat) {
// fall back to unmarshaling without context
logs, err = logsUnmarshaler.UnmarshalLogs(bytes)
}
return ctx, newLogsRequest(logs), err
}
logs, err := logsUnmarshaler.UnmarshalLogs(bytes)
if err != nil {
var req Request
return context.Background(), req, err
}
return context.Background(), newLogsRequest(logs), nil
}
func (logsEncoding) Marshal(ctx context.Context, req Request) ([]byte, error) {
logs := req.(*logsRequest).ld
if queue.PersistRequestContextOnWrite {
return pdatareq.MarshalLogs(ctx, logs)
}
return logsMarshaler.MarshalLogs(logs)
}
func (req *logsRequest) OnError(err error) Request {
var logError consumererror.Logs
if errors.As(err, &logError) {
return newLogsRequest(logError.Data())
}
return req
}
func (req *logsRequest) ItemsCount() int {
return req.ld.LogRecordCount()
}
func (req *logsRequest) size(sizer sizer.LogsSizer) int {
if req.cachedSize == -1 {
req.cachedSize = sizer.LogsSize(req.ld)
}
return req.cachedSize
}
func (req *logsRequest) setCachedSize(size int) {
req.cachedSize = size
}
type logsExporter struct {
*internal.BaseExporter
consumer.Logs
}
// NewLogs creates an exporter.Logs that records observability logs and wraps every request with a Span.
func NewLogs(
ctx context.Context,
set exporter.Settings,
cfg component.Config,
pusher consumer.ConsumeLogsFunc,
options ...Option,
) (exporter.Logs, error) {
if cfg == nil {
return nil, errNilConfig
}
if pusher == nil {
return nil, errNilPushLogs
}
return NewLogsRequest(ctx, set, requestFromLogs(), requestConsumeFromLogs(pusher),
append([]Option{internal.WithQueueBatchSettings(NewLogsQueueBatchSettings())}, options...)...)
}
// requestConsumeFromLogs returns a RequestConsumeFunc that consumes plog.Logs.
func requestConsumeFromLogs(pusher consumer.ConsumeLogsFunc) RequestConsumeFunc {
return func(ctx context.Context, request Request) error {
return pusher.ConsumeLogs(ctx, request.(*logsRequest).ld)
}
}
// requestFromLogs returns a RequestFromLogsFunc that converts plog.Logs into a Request.
func requestFromLogs() RequestConverterFunc[plog.Logs] {
return func(_ context.Context, ld plog.Logs) (Request, error) {
return newLogsRequest(ld), nil
}
}
// NewLogsRequest creates new logs exporter based on custom LogsConverter and Sender.
// Experimental: This API is at the early stage of development and may change without backward compatibility
// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved.
func NewLogsRequest(
_ context.Context,
set exporter.Settings,
converter RequestConverterFunc[plog.Logs],
pusher RequestConsumeFunc,
options ...Option,
) (exporter.Logs, error) {
if set.Logger == nil {
return nil, errNilLogger
}
if converter == nil {
return nil, errNilLogsConverter
}
if pusher == nil {
return nil, errNilConsumeRequest
}
be, err := internal.NewBaseExporter(set, pipeline.SignalLogs, pusher, options...)
if err != nil {
return nil, err
}
lc, err := consumer.NewLogs(newConsumeLogs(converter, be, set.Logger), be.ConsumerOptions...)
if err != nil {
return nil, err
}
return &logsExporter{BaseExporter: be, Logs: lc}, nil
}
func newConsumeLogs(converter RequestConverterFunc[plog.Logs], be *internal.BaseExporter, logger *zap.Logger) consumer.ConsumeLogsFunc {
return func(ctx context.Context, ld plog.Logs) error {
req, err := converter(ctx, ld)
if err != nil {
logger.Error("Failed to convert logs. Dropping data.",
zap.Int("dropped_log_records", ld.LogRecordCount()),
zap.Error(err))
return consumererror.NewPermanent(err)
}
return be.Send(ctx, req)
}
}