diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py index 7213f89d4a0..3263a1a59fa 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry/exporter/otlp/proto/common/_internal/_log_encoder/__init__.py @@ -30,44 +30,44 @@ ResourceLogs, ScopeLogs, ) -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import LogRecord -def encode_logs(batch: Sequence[LogData]) -> ExportLogsServiceRequest: +def encode_logs(batch: Sequence[LogRecord]) -> ExportLogsServiceRequest: return ExportLogsServiceRequest(resource_logs=_encode_resource_logs(batch)) -def _encode_log(log_data: LogData) -> PB2LogRecord: +def _encode_log(log_record: LogRecord) -> PB2LogRecord: span_id = ( None - if log_data.log_record.span_id == 0 - else _encode_span_id(log_data.log_record.span_id) + if log_record.span_id == 0 + else _encode_span_id(log_record.span_id) ) trace_id = ( None - if log_data.log_record.trace_id == 0 - else _encode_trace_id(log_data.log_record.trace_id) + if log_record.trace_id == 0 + else _encode_trace_id(log_record.trace_id) ) - body = log_data.log_record.body + body = log_record.body return PB2LogRecord( - time_unix_nano=log_data.log_record.timestamp, - observed_time_unix_nano=log_data.log_record.observed_timestamp, + time_unix_nano=log_record.timestamp, + observed_time_unix_nano=log_record.observed_timestamp, span_id=span_id, trace_id=trace_id, - flags=int(log_data.log_record.trace_flags), + flags=int(log_record.trace_flags), body=_encode_value(body) if body is not None else None, - severity_text=log_data.log_record.severity_text, - attributes=_encode_attributes(log_data.log_record.attributes), - dropped_attributes_count=log_data.log_record.dropped_attributes, - severity_number=log_data.log_record.severity_number.value, + severity_text=log_record.severity_text, + attributes=_encode_attributes(log_record.attributes), + dropped_attributes_count=log_record.dropped_attributes, + severity_number=log_record.severity_number.value, ) -def _encode_resource_logs(batch: Sequence[LogData]) -> List[ResourceLogs]: +def _encode_resource_logs(batch: Sequence[LogRecord]) -> List[ResourceLogs]: sdk_resource_logs = defaultdict(lambda: defaultdict(list)) for sdk_log in batch: - sdk_resource = sdk_log.log_record.resource + sdk_resource = sdk_log.resource sdk_instrumentation = sdk_log.instrumentation_scope or None pb2_log = _encode_log(sdk_log) diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/tests/test_log_encoder.py b/exporter/opentelemetry-exporter-otlp-proto-common/tests/test_log_encoder.py index 70f4c821c9e..055e71e6c96 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-common/tests/test_log_encoder.py +++ b/exporter/opentelemetry-exporter-otlp-proto-common/tests/test_log_encoder.py @@ -39,7 +39,7 @@ from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as PB2Resource, ) -from opentelemetry.sdk._logs import LogData, LogLimits +from opentelemetry.sdk._logs import LogLimits from opentelemetry.sdk._logs import LogRecord as SDKLogRecord from opentelemetry.sdk.resources import Resource as SDKResource from opentelemetry.sdk.util.instrumentation import InstrumentationScope @@ -54,7 +54,7 @@ def test_encode(self): def test_encode_no_body(self): sdk_logs, expected_encoding = self.get_test_logs() for log in sdk_logs: - log.log_record.body = None + log.body = None for resource_log in expected_encoding.resource_logs: for scope_log in resource_log.scope_logs: @@ -66,7 +66,7 @@ def test_encode_no_body(self): def test_dropped_attributes_count(self): sdk_logs = self._get_test_logs_dropped_attributes() encoded_logs = encode_logs(sdk_logs) - self.assertTrue(hasattr(sdk_logs[0].log_record, "dropped_attributes")) + self.assertTrue(hasattr(sdk_logs[0], "dropped_attributes")) self.assertEqual( # pylint:disable=no-member encoded_logs.resource_logs[0] @@ -77,78 +77,70 @@ def test_dropped_attributes_count(self): ) @staticmethod - def _get_sdk_log_data() -> List[LogData]: - log1 = LogData( - log_record=SDKLogRecord( - timestamp=1644650195189786880, - observed_timestamp=1644650195189786881, - trace_id=89564621134313219400156819398935297684, - span_id=1312458408527513268, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Do not go gentle into that good night. Rage, rage against the dying of the light", - resource=SDKResource( - {"first_resource": "value"}, - "resource_schema_url", - ), - attributes={"a": 1, "b": "c"}, + def _get_sdk_log_record() -> List[SDKLogRecord]: + log1 = SDKLogRecord( + timestamp=1644650195189786880, + observed_timestamp=1644650195189786881, + trace_id=89564621134313219400156819398935297684, + span_id=1312458408527513268, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Do not go gentle into that good night. Rage, rage against the dying of the light", + resource=SDKResource( + {"first_resource": "value"}, + "resource_schema_url", ), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) - log2 = LogData( - log_record=SDKLogRecord( - timestamp=1644650249738562048, - observed_timestamp=1644650249738562049, - trace_id=0, - span_id=0, - trace_flags=TraceFlags.DEFAULT, - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Cooper, this is no time for caution!", - resource=SDKResource({"second_resource": "CASE"}), - attributes={}, - ), + log2 = SDKLogRecord( + timestamp=1644650249738562048, + observed_timestamp=1644650249738562049, + trace_id=0, + span_id=0, + trace_flags=TraceFlags.DEFAULT, + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Cooper, this is no time for caution!", + resource=SDKResource({"second_resource": "CASE"}), + attributes={}, instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) - log3 = LogData( - log_record=SDKLogRecord( - timestamp=1644650427658989056, - observed_timestamp=1644650427658989057, - trace_id=271615924622795969659406376515024083555, - span_id=4242561578944770265, - trace_flags=TraceFlags(0x01), - severity_text="DEBUG", - severity_number=SeverityNumber.DEBUG, - body="To our galaxy", - resource=SDKResource({"second_resource": "CASE"}), - attributes={"a": 1, "b": "c"}, - ), + log3 = SDKLogRecord( + timestamp=1644650427658989056, + observed_timestamp=1644650427658989057, + trace_id=271615924622795969659406376515024083555, + span_id=4242561578944770265, + trace_flags=TraceFlags(0x01), + severity_text="DEBUG", + severity_number=SeverityNumber.DEBUG, + body="To our galaxy", + resource=SDKResource({"second_resource": "CASE"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=None, ) - log4 = LogData( - log_record=SDKLogRecord( - timestamp=1644650584292683008, - observed_timestamp=1644650584292683009, - trace_id=212592107417388365804938480559624925555, - span_id=6077757853989569223, - trace_flags=TraceFlags(0x01), - severity_text="INFO", - severity_number=SeverityNumber.INFO, - body="Love is the one thing that transcends time and space", - resource=SDKResource( - {"first_resource": "value"}, - "resource_schema_url", - ), - attributes={"filename": "model.py", "func_name": "run_method"}, + log4 = SDKLogRecord( + timestamp=1644650584292683008, + observed_timestamp=1644650584292683009, + trace_id=212592107417388365804938480559624925555, + span_id=6077757853989569223, + trace_flags=TraceFlags(0x01), + severity_text="INFO", + severity_number=SeverityNumber.INFO, + body="Love is the one thing that transcends time and space", + resource=SDKResource( + {"first_resource": "value"}, + "resource_schema_url", ), + attributes={"filename": "model.py", "func_name": "run_method"}, instrumentation_scope=InstrumentationScope( "another_name", "another_version" ), @@ -159,7 +151,7 @@ def _get_sdk_log_data() -> List[LogData]: def get_test_logs( self, ) -> Tuple[List[SDKLogRecord], ExportLogsServiceRequest]: - sdk_logs = self._get_sdk_log_data() + sdk_logs = self._get_sdk_log_record() pb2_service_request = ExportLogsServiceRequest( resource_logs=[ @@ -293,37 +285,33 @@ def get_test_logs( return sdk_logs, pb2_service_request @staticmethod - def _get_test_logs_dropped_attributes() -> List[LogData]: - log1 = LogData( - log_record=SDKLogRecord( - timestamp=1644650195189786880, - trace_id=89564621134313219400156819398935297684, - span_id=1312458408527513268, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Do not go gentle into that good night. Rage, rage against the dying of the light", - resource=SDKResource({"first_resource": "value"}), - attributes={"a": 1, "b": "c", "user_id": "B121092"}, - limits=LogLimits(max_attributes=1), - ), + def _get_test_logs_dropped_attributes() -> List[SDKLogRecord]: + log1 = SDKLogRecord( + timestamp=1644650195189786880, + trace_id=89564621134313219400156819398935297684, + span_id=1312458408527513268, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Do not go gentle into that good night. Rage, rage against the dying of the light", + resource=SDKResource({"first_resource": "value"}), + attributes={"a": 1, "b": "c", "user_id": "B121092"}, + limits=LogLimits(max_attributes=1), instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) - log2 = LogData( - log_record=SDKLogRecord( - timestamp=1644650249738562048, - trace_id=0, - span_id=0, - trace_flags=TraceFlags.DEFAULT, - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Cooper, this is no time for caution!", - resource=SDKResource({"second_resource": "CASE"}), - attributes={}, - ), + log2 = SDKLogRecord( + timestamp=1644650249738562048, + trace_id=0, + span_id=0, + trace_flags=TraceFlags.DEFAULT, + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Cooper, this is no time for caution!", + resource=SDKResource({"second_resource": "CASE"}), + attributes={}, instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_log_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_log_exporter/__init__.py index 8f629899d77..309ba068315 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_log_exporter/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/_log_exporter/__init__.py @@ -28,7 +28,6 @@ from opentelemetry.proto.collector.logs.v1.logs_service_pb2_grpc import ( LogsServiceStub, ) -from opentelemetry.sdk._logs import LogData from opentelemetry.sdk._logs import LogRecord as SDKLogRecord from opentelemetry.sdk._logs.export import LogExporter, LogExportResult from opentelemetry.sdk.environment_variables import ( @@ -103,11 +102,11 @@ def __init__( ) def _translate_data( - self, data: Sequence[LogData] + self, data: Sequence[SDKLogRecord] ) -> ExportLogsServiceRequest: return encode_logs(data) - def export(self, batch: Sequence[LogData]) -> LogExportResult: + def export(self, batch: Sequence[SDKLogRecord]) -> LogExportResult: return self._export(batch) def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py index a31679fb0d5..88d714aff81 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py @@ -52,7 +52,7 @@ from opentelemetry.proto.resource.v1.resource_pb2 import ( Resource as OTLPResource, ) -from opentelemetry.sdk._logs import LogData, LogRecord +from opentelemetry.sdk._logs import LogRecord from opentelemetry.sdk._logs.export import LogExportResult from opentelemetry.sdk.environment_variables import ( OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, @@ -126,79 +126,69 @@ def setUp(self): self.server.start() - self.log_data_1 = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=2604504634922341076776623263868986797, - span_id=5213367945872657620, - trace_flags=TraceFlags(0x01), - severity_text="WARNING", - severity_number=SeverityNumber.WARN, - body="Zhengzhou, We have a heaviest rains in 1000 years", - resource=SDKResource({"key": "value"}), - attributes={"a": 1, "b": "c"}, - ), + self.log_record_1 = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=2604504634922341076776623263868986797, + span_id=5213367945872657620, + trace_flags=TraceFlags(0x01), + severity_text="WARNING", + severity_number=SeverityNumber.WARN, + body="Zhengzhou, We have a heaviest rains in 1000 years", + resource=SDKResource({"key": "value"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) - self.log_data_2 = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=2604504634922341076776623263868986799, - span_id=5213367945872657623, - trace_flags=TraceFlags(0x01), - severity_text="INFO", - severity_number=SeverityNumber.INFO2, - body="Sydney, Opera House is closed", - resource=SDKResource({"key": "value"}), - attributes={"custom_attr": [1, 2, 3]}, - ), + self.log_record_2 = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=2604504634922341076776623263868986799, + span_id=5213367945872657623, + trace_flags=TraceFlags(0x01), + severity_text="INFO", + severity_number=SeverityNumber.INFO2, + body="Sydney, Opera House is closed", + resource=SDKResource({"key": "value"}), + attributes={"custom_attr": [1, 2, 3]}, instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) - self.log_data_3 = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=2604504634922341076776623263868986800, - span_id=5213367945872657628, - trace_flags=TraceFlags(0x01), - severity_text="ERROR", - severity_number=SeverityNumber.WARN, - body="Mumbai, Boil water before drinking", - resource=SDKResource({"service": "myapp"}), - ), + self.log_record_3 = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=2604504634922341076776623263868986800, + span_id=5213367945872657628, + trace_flags=TraceFlags(0x01), + severity_text="ERROR", + severity_number=SeverityNumber.WARN, + body="Mumbai, Boil water before drinking", + resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "third_name", "third_version" ), ) - self.log_data_4 = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=0, - span_id=5213367945872657629, - trace_flags=TraceFlags(0x01), - severity_text="ERROR", - severity_number=SeverityNumber.WARN, - body="Invalid trace id check", - resource=SDKResource({"service": "myapp"}), - ), + self.log_record_4 = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=0, + span_id=5213367945872657629, + trace_flags=TraceFlags(0x01), + severity_text="ERROR", + severity_number=SeverityNumber.WARN, + body="Invalid trace id check", + resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "fourth_name", "fourth_version" ), ) - self.log_data_5 = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=2604504634922341076776623263868986801, - span_id=0, - trace_flags=TraceFlags(0x01), - severity_text="ERROR", - severity_number=SeverityNumber.WARN, - body="Invalid span id check", - resource=SDKResource({"service": "myapp"}), - ), + self.log_record_5 = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=2604504634922341076776623263868986801, + span_id=0, + trace_flags=TraceFlags(0x01), + severity_text="ERROR", + severity_number=SeverityNumber.WARN, + body="Invalid span id check", + resource=SDKResource({"service": "myapp"}), instrumentation_scope=InstrumentationScope( "fifth_name", "fifth_version" ), @@ -400,7 +390,7 @@ def test_unavailable(self, mock_sleep, mock_expo): LogsServiceServicerUNAVAILABLE(), self.server ) self.assertEqual( - self.exporter.export([self.log_data_1]), LogExportResult.FAILURE + self.exporter.export([self.log_record_1]), LogExportResult.FAILURE ) mock_sleep.assert_called_with(0.01) @@ -415,7 +405,7 @@ def test_unavailable_delay(self, mock_sleep, mock_expo): LogsServiceServicerUNAVAILABLEDelay(), self.server ) self.assertEqual( - self.exporter.export([self.log_data_1]), LogExportResult.FAILURE + self.exporter.export([self.log_record_1]), LogExportResult.FAILURE ) mock_sleep.assert_called_with(0.01) @@ -424,7 +414,7 @@ def test_success(self): LogsServiceServicerSUCCESS(), self.server ) self.assertEqual( - self.exporter.export([self.log_data_1]), LogExportResult.SUCCESS + self.exporter.export([self.log_record_1]), LogExportResult.SUCCESS ) def test_failure(self): @@ -432,12 +422,12 @@ def test_failure(self): LogsServiceServicerALREADY_EXISTS(), self.server ) self.assertEqual( - self.exporter.export([self.log_data_1]), LogExportResult.FAILURE + self.exporter.export([self.log_record_1]), LogExportResult.FAILURE ) - def export_log_and_deserialize(self, log_data): + def export_log_and_deserialize(self, log_record): # pylint: disable=protected-access - translated_data = self.exporter._translate_data([log_data]) + translated_data = self.exporter._translate_data([log_record]) request_dict = MessageToDict(translated_data) log_records = ( request_dict.get("resourceLogs")[0] @@ -447,7 +437,7 @@ def export_log_and_deserialize(self, log_data): return log_records def test_exported_log_without_trace_id(self): - log_records = self.export_log_and_deserialize(self.log_data_4) + log_records = self.export_log_and_deserialize(self.log_record_4) if log_records: log_record = log_records[0] self.assertIn("spanId", log_record) @@ -460,7 +450,7 @@ def test_exported_log_without_trace_id(self): self.fail("No log records found") def test_exported_log_without_span_id(self): - log_records = self.export_log_and_deserialize(self.log_data_5) + log_records = self.export_log_and_deserialize(self.log_record_5) if log_records: log_record = log_records[0] self.assertIn("traceId", log_record) @@ -472,7 +462,7 @@ def test_exported_log_without_span_id(self): else: self.fail("No log records found") - def test_translate_log_data(self): + def test_translate_log_record(self): expected = ExportLogsServiceRequest( resource_logs=[ ResourceLogs( @@ -491,9 +481,9 @@ def test_translate_log_data(self): log_records=[ PB2LogRecord( # pylint: disable=no-member - time_unix_nano=self.log_data_1.log_record.timestamp, - observed_time_unix_nano=self.log_data_1.log_record.observed_timestamp, - severity_number=self.log_data_1.log_record.severity_number.value, + time_unix_nano=self.log_record_1.timestamp, + observed_time_unix_nano=self.log_record_1.observed_timestamp, + severity_number=self.log_record_1.severity_number.value, severity_text="WARNING", span_id=int.to_bytes( 5213367945872657620, 8, "big" @@ -517,7 +507,7 @@ def test_translate_log_data(self): ), ], flags=int( - self.log_data_1.log_record.trace_flags + self.log_record_1.trace_flags ), ) ], @@ -529,7 +519,7 @@ def test_translate_log_data(self): # pylint: disable=protected-access self.assertEqual( - expected, self.exporter._translate_data([self.log_data_1]) + expected, self.exporter._translate_data([self.log_record_1]) ) def test_translate_multiple_logs(self): @@ -551,9 +541,9 @@ def test_translate_multiple_logs(self): log_records=[ PB2LogRecord( # pylint: disable=no-member - time_unix_nano=self.log_data_1.log_record.timestamp, - observed_time_unix_nano=self.log_data_1.log_record.observed_timestamp, - severity_number=self.log_data_1.log_record.severity_number.value, + time_unix_nano=self.log_record_1.timestamp, + observed_time_unix_nano=self.log_record_1.observed_timestamp, + severity_number=self.log_record_1.severity_number.value, severity_text="WARNING", span_id=int.to_bytes( 5213367945872657620, 8, "big" @@ -577,7 +567,7 @@ def test_translate_multiple_logs(self): ), ], flags=int( - self.log_data_1.log_record.trace_flags + self.log_record_1.trace_flags ), ) ], @@ -589,9 +579,9 @@ def test_translate_multiple_logs(self): log_records=[ PB2LogRecord( # pylint: disable=no-member - time_unix_nano=self.log_data_2.log_record.timestamp, - observed_time_unix_nano=self.log_data_2.log_record.observed_timestamp, - severity_number=self.log_data_2.log_record.severity_number.value, + time_unix_nano=self.log_record_2.timestamp, + observed_time_unix_nano=self.log_record_2.observed_timestamp, + severity_number=self.log_record_2.severity_number.value, severity_text="INFO", span_id=int.to_bytes( 5213367945872657623, 8, "big" @@ -611,7 +601,7 @@ def test_translate_multiple_logs(self): ), ], flags=int( - self.log_data_2.log_record.trace_flags + self.log_record_2.trace_flags ), ) ], @@ -635,9 +625,9 @@ def test_translate_multiple_logs(self): log_records=[ PB2LogRecord( # pylint: disable=no-member - time_unix_nano=self.log_data_3.log_record.timestamp, - observed_time_unix_nano=self.log_data_3.log_record.observed_timestamp, - severity_number=self.log_data_3.log_record.severity_number.value, + time_unix_nano=self.log_record_3.timestamp, + observed_time_unix_nano=self.log_record_3.observed_timestamp, + severity_number=self.log_record_3.severity_number.value, severity_text="ERROR", span_id=int.to_bytes( 5213367945872657628, 8, "big" @@ -652,7 +642,7 @@ def test_translate_multiple_logs(self): ), attributes=[], flags=int( - self.log_data_3.log_record.trace_flags + self.log_record_3.trace_flags ), ) ], @@ -666,6 +656,6 @@ def test_translate_multiple_logs(self): self.assertEqual( expected, self.exporter._translate_data( - [self.log_data_1, self.log_data_2, self.log_data_3] + [self.log_record_1, self.log_record_2, self.log_record_3] ), ) diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_log_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_log_exporter/__init__.py index 21b877380c8..96f30df0d3f 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_log_exporter/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/_log_exporter/__init__.py @@ -30,7 +30,7 @@ _OTLP_HTTP_HEADERS, Compression, ) -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import LogRecord from opentelemetry.sdk._logs.export import ( LogExporter, LogExportResult, @@ -149,7 +149,7 @@ def _retryable(resp: requests.Response) -> bool: return True return False - def export(self, batch: Sequence[LogData]) -> LogExportResult: + def export(self, batch: Sequence[LogRecord]) -> LogExportResult: # After the call to Shutdown subsequent calls to Export are # not allowed and should return a Failure result. if self._shutdown: diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/tests/test_proto_log_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-http/tests/test_proto_log_exporter.py index 66b0f890d76..0cc5c3840dd 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-http/tests/test_proto_log_exporter.py +++ b/exporter/opentelemetry-exporter-otlp-proto-http/tests/test_proto_log_exporter.py @@ -35,7 +35,6 @@ from opentelemetry.proto.collector.logs.v1.logs_service_pb2 import ( ExportLogsServiceRequest, ) -from opentelemetry.sdk._logs import LogData from opentelemetry.sdk._logs import LogRecord as SDKLogRecord from opentelemetry.sdk._logs.export import LogExportResult from opentelemetry.sdk.environment_variables import ( @@ -214,18 +213,16 @@ def export_log_and_deserialize(log): return log_records def test_exported_log_without_trace_id(self): - log = LogData( - log_record=SDKLogRecord( - timestamp=1644650195189786182, - trace_id=0, - span_id=1312458408527513292, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Invalid trace id check", - resource=SDKResource({"first_resource": "value"}), - attributes={"a": 1, "b": "c"}, - ), + log = SDKLogRecord( + timestamp=1644650195189786182, + trace_id=0, + span_id=1312458408527513292, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Invalid trace id check", + resource=SDKResource({"first_resource": "value"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope("name", "version"), ) log_records = TestOTLPHTTPLogExporter.export_log_and_deserialize(log) @@ -241,18 +238,16 @@ def test_exported_log_without_trace_id(self): self.fail("No log records found") def test_exported_log_without_span_id(self): - log = LogData( - log_record=SDKLogRecord( - timestamp=1644650195189786360, - trace_id=89564621134313219400156819398935297696, - span_id=0, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Invalid span id check", - resource=SDKResource({"first_resource": "value"}), - attributes={"a": 1, "b": "c"}, - ), + log = SDKLogRecord( + timestamp=1644650195189786360, + trace_id=89564621134313219400156819398935297696, + span_id=0, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Invalid span id check", + resource=SDKResource({"first_resource": "value"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope("name", "version"), ) log_records = TestOTLPHTTPLogExporter.export_log_and_deserialize(log) @@ -279,7 +274,7 @@ def test_exponential_backoff(self, mock_sleep): ) exporter = OTLPLogExporter(endpoint="http://logs.example.com/export") - logs = self._get_sdk_log_data() + logs = self._get_sdk_log_record() exporter.export(logs) mock_sleep.assert_has_calls( @@ -287,68 +282,60 @@ def test_exponential_backoff(self, mock_sleep): ) @staticmethod - def _get_sdk_log_data() -> List[LogData]: - log1 = LogData( - log_record=SDKLogRecord( - timestamp=1644650195189786880, - trace_id=89564621134313219400156819398935297684, - span_id=1312458408527513268, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Do not go gentle into that good night. Rage, rage against the dying of the light", - resource=SDKResource({"first_resource": "value"}), - attributes={"a": 1, "b": "c"}, - ), + def _get_sdk_log_record() -> List[SDKLogRecord]: + log1 = SDKLogRecord( + timestamp=1644650195189786880, + trace_id=89564621134313219400156819398935297684, + span_id=1312458408527513268, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Do not go gentle into that good night. Rage, rage against the dying of the light", + resource=SDKResource({"first_resource": "value"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) - log2 = LogData( - log_record=SDKLogRecord( - timestamp=1644650249738562048, - trace_id=0, - span_id=0, - trace_flags=TraceFlags.DEFAULT, - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Cooper, this is no time for caution!", - resource=SDKResource({"second_resource": "CASE"}), - attributes={}, - ), + log2 = SDKLogRecord( + timestamp=1644650249738562048, + trace_id=0, + span_id=0, + trace_flags=TraceFlags.DEFAULT, + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Cooper, this is no time for caution!", + resource=SDKResource({"second_resource": "CASE"}), + attributes={}, instrumentation_scope=InstrumentationScope( "second_name", "second_version" ), ) - log3 = LogData( - log_record=SDKLogRecord( - timestamp=1644650427658989056, - trace_id=271615924622795969659406376515024083555, - span_id=4242561578944770265, - trace_flags=TraceFlags(0x01), - severity_text="DEBUG", - severity_number=SeverityNumber.DEBUG, - body="To our galaxy", - resource=SDKResource({"second_resource": "CASE"}), - attributes={"a": 1, "b": "c"}, - ), + log3 = SDKLogRecord( + timestamp=1644650427658989056, + trace_id=271615924622795969659406376515024083555, + span_id=4242561578944770265, + trace_flags=TraceFlags(0x01), + severity_text="DEBUG", + severity_number=SeverityNumber.DEBUG, + body="To our galaxy", + resource=SDKResource({"second_resource": "CASE"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=None, ) - log4 = LogData( - log_record=SDKLogRecord( - timestamp=1644650584292683008, - trace_id=212592107417388365804938480559624925555, - span_id=6077757853989569223, - trace_flags=TraceFlags(0x01), - severity_text="INFO", - severity_number=SeverityNumber.INFO, - body="Love is the one thing that transcends time and space", - resource=SDKResource({"first_resource": "value"}), - attributes={"filename": "model.py", "func_name": "run_method"}, - ), + log4 = SDKLogRecord( + timestamp=1644650584292683008, + trace_id=212592107417388365804938480559624925555, + span_id=6077757853989569223, + trace_flags=TraceFlags(0x01), + severity_text="INFO", + severity_number=SeverityNumber.INFO, + body="Love is the one thing that transcends time and space", + resource=SDKResource({"first_resource": "value"}), + attributes={"filename": "model.py", "func_name": "run_method"}, instrumentation_scope=InstrumentationScope( "another_name", "another_version" ), diff --git a/opentelemetry-api/src/opentelemetry/_logs/__init__.py b/opentelemetry-api/src/opentelemetry/_logs/__init__.py index aaf29e5fe63..0610e005b60 100644 --- a/opentelemetry-api/src/opentelemetry/_logs/__init__.py +++ b/opentelemetry-api/src/opentelemetry/_logs/__init__.py @@ -45,6 +45,7 @@ ) from opentelemetry._logs.severity import SeverityNumber, std_to_otel +# TODO: do we need to expose `std_to_otel`? If so, let's pick a better name for it __all__ = [ "Logger", "LoggerProvider", diff --git a/opentelemetry-api/src/opentelemetry/_logs/_internal/__init__.py b/opentelemetry-api/src/opentelemetry/_logs/_internal/__init__.py index f20bd8507e5..419389fc872 100644 --- a/opentelemetry-api/src/opentelemetry/_logs/_internal/__init__.py +++ b/opentelemetry-api/src/opentelemetry/_logs/_internal/__init__.py @@ -158,22 +158,19 @@ def get_logger( name: str, version: Optional[str] = None, schema_url: Optional[str] = None, - attributes: Optional[Attributes] = None, + attributes: Optional[ + Attributes + ] = None, # TODO: attributes should support AnyValue ) -> Logger: """Returns a `Logger` for use by the given instrumentation library. - For any two calls it is undefined whether the same or different - `Logger` instances are returned, even for different library names. - This function may return different `Logger` types (e.g. a no-op logger vs. a functional logger). Args: - name: The name of the instrumenting module. - ``__name__`` may not be used as this can result in - different logger names if the loggers are in different files. - It is better to use a fixed string that can be imported where - needed and used consistently as the name of the logger. + name: The name of the instrumenting module, package, module or class. + For log sources which define a logger name (e.g. `logging.Logger.name`) + the Logger Name should be recorded as the instrumentation scope name. This should *not* be the name of the module that is instrumented but the name of the module doing the instrumentation. @@ -185,6 +182,9 @@ def get_logger( ``importlib.metadata.version(instrumenting_library_name)``. schema_url: Optional. Specifies the Schema URL of the emitted telemetry. + + attributes: Optional. Instrumentation scope attributes to be associated + with the logger. """ diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_events/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_events/__init__.py index ae16302546d..275da78d241 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_events/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_events/__init__.py @@ -61,6 +61,7 @@ def emit(self, event: Event) -> None: body=event.body, resource=getattr(self._logger, "resource", None), attributes=event.attributes, + instrumentation_scope=self._logger._instrumentation_scope, ) self._logger.emit(log_record) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py index 0254c135e84..f9c8c218110 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py @@ -14,7 +14,6 @@ from opentelemetry.sdk._logs._internal import ( - LogData, LogDroppedAttributesWarning, Logger, LoggerProvider, @@ -25,7 +24,6 @@ ) __all__ = [ - "LogData", "Logger", "LoggerProvider", "LoggingHandler", diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py index 2d52b1bc74c..5bc8d1bde36 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py @@ -183,6 +183,7 @@ def __init__( resource: Optional[Resource] = None, attributes: Optional[Attributes] = None, limits: Optional[LogLimits] = _UnsetLogLimits, + instrumentation_scope: Optional[InstrumentationScope] = None, ): super().__init__( **{ @@ -211,6 +212,7 @@ def __init__( LogDroppedAttributesWarning, stacklevel=2, ) + self.instrumentation_scope = instrumentation_scope def __eq__(self, other: object) -> bool: if not isinstance(other, LogRecord): @@ -252,18 +254,6 @@ def dropped_attributes(self) -> int: return 0 -class LogData: - """Readable LogRecord data plus associated InstrumentationLibrary.""" - - def __init__( - self, - log_record: LogRecord, - instrumentation_scope: InstrumentationScope, - ): - self.log_record = log_record - self.instrumentation_scope = instrumentation_scope - - class LogRecordProcessor(abc.ABC): """Interface to hook the log record emitting action. @@ -273,8 +263,8 @@ class LogRecordProcessor(abc.ABC): """ @abc.abstractmethod - def emit(self, log_data: LogData): - """Emits the `LogData`""" + def on_emit(self, log_record: LogRecord): + """Emits the `LogRecord`""" @abc.abstractmethod def shutdown(self): @@ -313,13 +303,13 @@ def __init__(self): def add_log_record_processor( self, log_record_processor: LogRecordProcessor ) -> None: - """Adds a Logprocessor to the list of log processors handled by this instance""" + """Adds a LogRecordProcessor to the list of log processors handled by this instance""" with self._lock: self._log_record_processors += (log_record_processor,) - def emit(self, log_data: LogData) -> None: + def on_emit(self, log_record: LogRecord) -> None: for lp in self._log_record_processors: - lp.emit(log_data) + lp.on_emit(log_record) def shutdown(self) -> None: """Shutdown the log processors one by one""" @@ -391,8 +381,8 @@ def _submit_and_wait( for future in futures: future.result() - def emit(self, log_data: LogData): - self._submit_and_wait(lambda lp: lp.emit, log_data) + def on_emit(self, log_record: LogRecord): + self._submit_and_wait(lambda lp: lp.on_emit, log_record) def shutdown(self): self._submit_and_wait(lambda lp: lp.shutdown) @@ -479,6 +469,7 @@ def _get_attributes(record: logging.LogRecord) -> Attributes: } # Add standard code attributes for logs. + # TODO: this should not be there by default - code namespace is not stable in semantic conventions. attributes[SpanAttributes.CODE_FILEPATH] = record.pathname attributes[SpanAttributes.CODE_FUNCTION] = record.funcName attributes[SpanAttributes.CODE_LINENO] = record.lineno @@ -567,6 +558,7 @@ def _translate(self, record: logging.LogRecord) -> LogRecord: body=body, resource=logger.resource, attributes=attributes, + instrumentation_scope=logger._instrumentation_scope, ) def emit(self, record: logging.LogRecord) -> None: @@ -614,11 +606,8 @@ def resource(self): return self._resource def emit(self, record: LogRecord): - """Emits the :class:`LogData` by associating :class:`LogRecord` - and instrumentation info. - """ - log_data = LogData(record, self._instrumentation_scope) - self._multi_log_record_processor.emit(log_data) + """Emits the :class:`LogRecord` to the log processors.""" + self._multi_log_record_processor.on_emit(record) class LoggerProvider(APILoggerProvider): @@ -720,6 +709,8 @@ def shutdown(self): if self._at_exit_handler is not None: atexit.unregister(self._at_exit_handler) self._at_exit_handler = None + # TODO + # self._disabled = True def force_flush(self, timeout_millis: int = 30000) -> bool: """Force flush the log processors. diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py index e5669580c4b..d5b1d9dbefa 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/__init__.py @@ -29,7 +29,8 @@ detach, set_value, ) -from opentelemetry.sdk._logs import LogData, LogRecord, LogRecordProcessor +from opentelemetry._logs import LogRecord as APILogRecord +from opentelemetry.sdk._logs import LogRecord, LogRecordProcessor from opentelemetry.sdk.environment_variables import ( OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, @@ -65,11 +66,11 @@ class LogExporter(abc.ABC): """ @abc.abstractmethod - def export(self, batch: Sequence[LogData]): + def export(self, batch: Sequence[LogRecord]): """Exports a batch of logs. Args: - batch: The list of `LogData` objects to be exported + batch: The list of `LogRecord` objects to be exported Returns: The result of the export @@ -100,9 +101,9 @@ def __init__( self.out = out self.formatter = formatter - def export(self, batch: Sequence[LogData]): - for data in batch: - self.out.write(self.formatter(data.log_record)) + def export(self, batch: Sequence[LogRecord]): + for log_record in batch: + self.out.write(self.formatter(log_record)) self.out.flush() return LogExportResult.SUCCESS @@ -112,7 +113,7 @@ def shutdown(self): class SimpleLogRecordProcessor(LogRecordProcessor): """This is an implementation of LogRecordProcessor which passes - received logs in the export-friendly LogData representation to the + received logs in the export-friendly LogRecord to the configured LogExporter, as soon as they are emitted. """ @@ -120,13 +121,13 @@ def __init__(self, exporter: LogExporter): self._exporter = exporter self._shutdown = False - def emit(self, log_data: LogData): + def on_emit(self, log_record: APILogRecord): if self._shutdown: _logger.warning("Processor is already shutdown, ignoring call") return token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True)) try: - self._exporter.export((log_data,)) + self._exporter.export((log_record,)) except Exception: # pylint: disable=broad-exception-caught _logger.exception("Exception while exporting logs.") detach(token) @@ -152,7 +153,7 @@ def __init__(self): class BatchLogRecordProcessor(LogRecordProcessor): """This is an implementation of LogRecordProcessor which creates batches of - received logs in the export-friendly LogData representation and + received logs in the export-friendly LogRecord representation and send to the configured LogExporter, as soon as they are emitted. `BatchLogRecordProcessor` is configurable with the following environment @@ -164,9 +165,9 @@ class BatchLogRecordProcessor(LogRecordProcessor): - :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` """ - _queue: Deque[LogData] + _queue: Deque[LogRecord] _flush_request: Optional[_FlushRequest] - _log_records: List[Optional[LogData]] + _log_records: List[Optional[LogRecord]] def __init__( self, @@ -341,8 +342,8 @@ def _get_or_create_flush_request(self) -> _FlushRequest: self._flush_request = _FlushRequest() return self._flush_request - def emit(self, log_data: LogData) -> None: - """Adds the `LogData` to queue and notifies the waiting threads + def on_emit(self, log_record: APILogRecord) -> None: + """Adds the `LogRecord` to queue and notifies the waiting threads when size of queue reaches max_export_batch_size. """ if self._shutdown: @@ -350,7 +351,7 @@ def emit(self, log_data: LogData) -> None: if self._pid != os.getpid(): _BSP_RESET_ONCE.do_once(self._at_fork_reinit) - self._queue.appendleft(log_data) + self._queue.appendleft(log_record) if len(self._queue) >= self._max_export_batch_size: with self._condition: self._condition.notify() diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py index 68cb6b7389a..b4e0239c246 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/export/in_memory_log_exporter.py @@ -15,7 +15,7 @@ import threading import typing -from opentelemetry.sdk._logs import LogData +from opentelemetry.sdk._logs import LogRecord from opentelemetry.sdk._logs.export import LogExporter, LogExportResult @@ -36,11 +36,11 @@ def clear(self) -> None: with self._lock: self._logs.clear() - def get_finished_logs(self) -> typing.Tuple[LogData, ...]: + def get_finished_logs(self) -> typing.Tuple[LogRecord, ...]: with self._lock: return tuple(self._logs) - def export(self, batch: typing.Sequence[LogData]) -> LogExportResult: + def export(self, batch: typing.Sequence[LogRecord]) -> LogExportResult: if self._stopped: return LogExportResult.FAILURE with self._lock: diff --git a/opentelemetry-sdk/tests/events/test_events.py b/opentelemetry-sdk/tests/events/test_events.py index 7b8d42ff316..5aebd6d8fcb 100644 --- a/opentelemetry-sdk/tests/events/test_events.py +++ b/opentelemetry-sdk/tests/events/test_events.py @@ -158,6 +158,7 @@ def test_event_logger_emit(self, logger_mock, log_record_mock): "foo": "bar", "event.name": "test_event", }, + instrumentation_scope=event_logger._logger._instrumentation_scope, ) logger_mock_inst.emit.assert_called_once_with(log_record_mock_inst) diff --git a/opentelemetry-sdk/tests/logs/test_export.py b/opentelemetry-sdk/tests/logs/test_export.py index ce31d3991fc..fbd33764795 100644 --- a/opentelemetry-sdk/tests/logs/test_export.py +++ b/opentelemetry-sdk/tests/logs/test_export.py @@ -24,7 +24,6 @@ from opentelemetry._logs import SeverityNumber from opentelemetry.sdk import trace from opentelemetry.sdk._logs import ( - LogData, LoggerProvider, LoggingHandler, LogRecord, @@ -65,7 +64,7 @@ def test_simple_log_record_processor_default_level(self): logger.warning("Something is wrong") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) - warning_log_record = finished_logs[0].log_record + warning_log_record = finished_logs[0] self.assertEqual(warning_log_record.body, "Something is wrong") self.assertEqual(warning_log_record.severity_text, "WARN") self.assertEqual( @@ -95,8 +94,8 @@ def test_simple_log_record_processor_custom_level(self): finished_logs = exporter.get_finished_logs() # Make sure only level >= logging.CRITICAL logs are recorded self.assertEqual(len(finished_logs), 2) - critical_log_record = finished_logs[0].log_record - fatal_log_record = finished_logs[1].log_record + critical_log_record = finished_logs[0] + fatal_log_record = finished_logs[1] self.assertEqual(critical_log_record.body, "Error message") self.assertEqual(critical_log_record.severity_text, "ERROR") self.assertEqual( @@ -129,7 +128,7 @@ def test_simple_log_record_processor_trace_correlation(self): logger.warning("Warning message") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) - log_record = finished_logs[0].log_record + log_record = finished_logs[0] self.assertEqual(log_record.body, "Warning message") self.assertEqual(log_record.severity_text, "WARN") self.assertEqual(log_record.severity_number, SeverityNumber.WARN) @@ -148,7 +147,7 @@ def test_simple_log_record_processor_trace_correlation(self): logger.critical("Critical message within span") finished_logs = exporter.get_finished_logs() - log_record = finished_logs[0].log_record + log_record = finished_logs[0] self.assertEqual(log_record.body, "Critical message within span") self.assertEqual(log_record.severity_text, "CRITICAL") self.assertEqual(log_record.severity_number, SeverityNumber.FATAL) @@ -176,7 +175,7 @@ def test_simple_log_record_processor_shutdown(self): logger.warning("Something is wrong") finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) - warning_log_record = finished_logs[0].log_record + warning_log_record = finished_logs[0] self.assertEqual(warning_log_record.body, "Something is wrong") self.assertEqual(warning_log_record.severity_text, "WARN") self.assertEqual( @@ -221,7 +220,7 @@ def test_simple_log_record_processor_different_msg_types(self): ({"key": "value"}, "ERROR"), ] emitted = [ - (item.log_record.body, item.log_record.severity_text) + (item.body, item.severity_text) for item in finished_logs ] self.assertEqual(expected, emitted) @@ -274,7 +273,7 @@ def test_simple_log_record_processor_different_msg_types_with_formatter( ("different_msg_types - ERROR - {'key': 'value'}", "ERROR"), ] emitted = [ - (item.log_record.body, item.log_record.severity_text) + (item.body, item.severity_text) for item in finished_logs ] self.assertEqual(expected, emitted) @@ -292,7 +291,7 @@ def test_emit_call_log_record(self): logger.addHandler(LoggingHandler(logger_provider=provider)) logger.error("error") - self.assertEqual(log_record_processor.emit.call_count, 1) + self.assertEqual(log_record_processor.on_emit.call_count, 1) def test_args(self): exporter = InMemoryLogExporter() @@ -447,7 +446,7 @@ def test_shutdown(self): ), ] emitted = [ - (item.log_record.body, item.log_record.severity_text) + (item.body, item.severity_text) for item in finished_logs ] self.assertEqual(expected, emitted) @@ -469,7 +468,7 @@ def test_force_flush(self): log_record_processor.force_flush() finished_logs = exporter.get_finished_logs() self.assertEqual(len(finished_logs), 1) - log_record = finished_logs[0].log_record + log_record = finished_logs[0] self.assertEqual(log_record.body, "Earth is burning") self.assertEqual(log_record.severity_number, SeverityNumber.FATAL) self.assertEqual( @@ -577,18 +576,16 @@ def _target(): class TestConsoleLogExporter(unittest.TestCase): def test_export(self): # pylint: disable=no-self-use """Check that the console exporter prints log records.""" - log_data = LogData( - log_record=LogRecord( - timestamp=int(time.time() * 1e9), - trace_id=2604504634922341076776623263868986797, - span_id=5213367945872657620, - trace_flags=TraceFlags(0x01), - severity_text="WARN", - severity_number=SeverityNumber.WARN, - body="Zhengzhou, We have a heaviest rains in 1000 years", - resource=SDKResource({"key": "value"}), - attributes={"a": 1, "b": "c"}, - ), + log_record = LogRecord( + timestamp=int(time.time() * 1e9), + trace_id=2604504634922341076776623263868986797, + span_id=5213367945872657620, + trace_flags=TraceFlags(0x01), + severity_text="WARN", + severity_number=SeverityNumber.WARN, + body="Zhengzhou, We have a heaviest rains in 1000 years", + resource=SDKResource({"key": "value"}), + attributes={"a": 1, "b": "c"}, instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), @@ -598,9 +595,9 @@ def test_export(self): # pylint: disable=no-self-use # the exporter instance instead. with patch.object(exporter, "out") as mock_stdout: - exporter.export([log_data]) + exporter.export([log_record]) mock_stdout.write.assert_called_once_with( - log_data.log_record.to_json() + os.linesep + log_record.to_json() + os.linesep ) self.assertEqual(mock_stdout.write.call_count, 1) @@ -615,11 +612,10 @@ def formatter(record): # pylint: disable=unused-argument mock_stdout = Mock() exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter) - log_data = LogData( - log_record=LogRecord(), + log_record = LogRecord( instrumentation_scope=InstrumentationScope( "first_name", "first_version" ), ) - exporter.export([log_data]) + exporter.export([log_record]) mock_stdout.write.assert_called_once_with(mock_record_str) diff --git a/opentelemetry-sdk/tests/logs/test_handler.py b/opentelemetry-sdk/tests/logs/test_handler.py index f6daa1b22cf..fea33029cb2 100644 --- a/opentelemetry-sdk/tests/logs/test_handler.py +++ b/opentelemetry-sdk/tests/logs/test_handler.py @@ -23,9 +23,9 @@ from opentelemetry.attributes import BoundedAttributes from opentelemetry.sdk import trace from opentelemetry.sdk._logs import ( - LogData, LoggerProvider, LoggingHandler, + LogRecord, LogRecordProcessor, ) from opentelemetry.semconv.trace import SpanAttributes @@ -313,10 +313,10 @@ def set_up_test_logging(level, formatter=None, root_logger=False): class FakeProcessor(LogRecordProcessor): def __init__(self): - self.log_data_emitted = [] + self.log_record_emitted = [] - def emit(self, log_data: LogData): - self.log_data_emitted.append(log_data) + def on_emit(self, log_record: LogRecord): + self.log_record_emitted.append(log_record) def shutdown(self): pass @@ -325,7 +325,7 @@ def force_flush(self, timeout_millis: int = 30000): pass def emit_count(self): - return len(self.log_data_emitted) + return len(self.log_record_emitted) def get_log_record(self, i): - return self.log_data_emitted[i].log_record + return self.log_record_emitted[i] diff --git a/opentelemetry-sdk/tests/logs/test_multi_log_processor.py b/opentelemetry-sdk/tests/logs/test_multi_log_processor.py index 110fedb9578..d043ab19589 100644 --- a/opentelemetry-sdk/tests/logs/test_multi_log_processor.py +++ b/opentelemetry-sdk/tests/logs/test_multi_log_processor.py @@ -38,11 +38,11 @@ def __init__(self, exporter, logs_list): self._log_list = logs_list self._closed = False - def emit(self, log_data): + def on_emit(self, log_record): if self._closed: return self._log_list.append( - (log_data.log_record.body, log_data.log_record.severity_text) + (log_record.body, log_record.severity_text) ) def shutdown(self): @@ -118,9 +118,9 @@ def test_on_emit(self): for mock in mocks: multi_log_record_processor.add_log_record_processor(mock) record = self.make_record() - multi_log_record_processor.emit(record) + multi_log_record_processor.on_emit(record) for mock in mocks: - mock.emit.assert_called_with(record) + mock.on_emit.assert_called_with(record) multi_log_record_processor.shutdown() def test_on_shutdown(self): diff --git a/opentelemetry-sdk/tests/test_configurator.py b/opentelemetry-sdk/tests/test_configurator.py index 034d5b8c9be..6a6060792d7 100644 --- a/opentelemetry-sdk/tests/test_configurator.py +++ b/opentelemetry-sdk/tests/test_configurator.py @@ -104,20 +104,21 @@ class DummyMeterProvider(MeterProvider): class DummyLogger: - def __init__(self, name, resource, processor): + def __init__(self, name, resource, processor, instrumentation_scope=None): self.name = name self.resource = resource + self._instrumentation_scope = instrumentation_scope self.processor = processor def emit(self, record): - self.processor.emit(record) + self.processor.on_emit(record) class DummyLogRecordProcessor: def __init__(self, exporter): self.exporter = exporter - def emit(self, record): + def on_emit(self, record): self.exporter.export([record]) def force_flush(self, time):