diff --git a/MANIFEST.in b/MANIFEST.in index bf746435c..0759bce87 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,3 +9,4 @@ include newrelic/packages/wrapt/LICENSE include newrelic/packages/wrapt/README include newrelic/packages/urllib3/LICENSE.txt include newrelic/packages/isort/LICENSE +include newrelic/packages/opentelemetry_proto/LICENSE.txt diff --git a/THIRD_PARTY_NOTICES.md b/THIRD_PARTY_NOTICES.md index a1dd7e07d..7c4242cc2 100644 --- a/THIRD_PARTY_NOTICES.md +++ b/THIRD_PARTY_NOTICES.md @@ -26,6 +26,15 @@ Distributed under the following license(s): * [The MIT License](http://opensource.org/licenses/MIT) +## [opentelemetry-proto](https://pypi.org/project/opentelemetry-proto) + +Copyright (c) The OpenTelemetry Authors + +Distributed under the following license(s): + +* [The Apache License, Version 2.0 License](https://opensource.org/license/apache-2-0/) + + ## [six](https://pypi.org/project/six) Copyright (c) 2010-2013 Benjamin Peterson diff --git a/newrelic/agent.py b/newrelic/agent.py index 95a540780..2c7f0fb85 100644 --- a/newrelic/agent.py +++ b/newrelic/agent.py @@ -59,6 +59,7 @@ from newrelic.api.transaction import record_custom_metric as __record_custom_metric from newrelic.api.transaction import record_custom_metrics as __record_custom_metrics from newrelic.api.transaction import record_log_event as __record_log_event +from newrelic.api.transaction import record_ml_event as __record_ml_event from newrelic.api.transaction import set_background_task as __set_background_task from newrelic.api.transaction import set_transaction_name as __set_transaction_name from newrelic.api.transaction import suppress_apdex_metric as __suppress_apdex_metric @@ -152,6 +153,7 @@ def __asgi_application(*args, **kwargs): from newrelic.api.message_transaction import ( wrap_message_transaction as __wrap_message_transaction, ) +from newrelic.api.ml_model import wrap_mlmodel as __wrap_mlmodel from newrelic.api.profile_trace import ProfileTraceWrapper as __ProfileTraceWrapper from newrelic.api.profile_trace import profile_trace as __profile_trace from newrelic.api.profile_trace import wrap_profile_trace as __wrap_profile_trace @@ -206,11 +208,6 @@ def __asgi_application(*args, **kwargs): # EXPERIMENTAL - Generator traces are currently experimental and may not # exist in this form in future versions of the agent. - -# EXPERIMENTAL - Profile traces are currently experimental and may not -# exist in this form in future versions of the agent. - - initialize = __initialize extra_settings = __wrap_api_call(__extra_settings, "extra_settings") global_settings = __wrap_api_call(__global_settings, "global_settings") @@ -248,6 +245,7 @@ def __asgi_application(*args, **kwargs): record_custom_metrics = __wrap_api_call(__record_custom_metrics, "record_custom_metrics") record_custom_event = __wrap_api_call(__record_custom_event, "record_custom_event") record_log_event = __wrap_api_call(__record_log_event, "record_log_event") +record_ml_event = __wrap_api_call(__record_ml_event, "record_ml_event") accept_distributed_trace_payload = __wrap_api_call( __accept_distributed_trace_payload, "accept_distributed_trace_payload" ) @@ -341,3 +339,4 @@ def __asgi_application(*args, **kwargs): wrap_out_function = __wrap_api_call(__wrap_out_function, "wrap_out_function") insert_html_snippet = __wrap_api_call(__insert_html_snippet, "insert_html_snippet") verify_body_exists = __wrap_api_call(__verify_body_exists, "verify_body_exists") +wrap_mlmodel = __wrap_api_call(__wrap_mlmodel, "wrap_mlmodel") diff --git a/newrelic/api/application.py b/newrelic/api/application.py index ea57829f2..e2e7be139 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -142,10 +142,22 @@ def record_custom_metrics(self, metrics): if self.active and metrics: self._agent.record_custom_metrics(self._name, metrics) + def record_dimensional_metric(self, name, value, tags=None): + if self.active: + self._agent.record_dimensional_metric(self._name, name, value, tags) + + def record_dimensional_metrics(self, metrics): + if self.active and metrics: + self._agent.record_dimensional_metrics(self._name, metrics) + def record_custom_event(self, event_type, params): if self.active: self._agent.record_custom_event(self._name, event_type, params) + def record_ml_event(self, event_type, params): + if self.active: + self._agent.record_ml_event(self._name, event_type, params) + def record_transaction(self, data): if self.active: self._agent.record_transaction(self._name, data) diff --git a/newrelic/api/ml_model.py b/newrelic/api/ml_model.py new file mode 100644 index 000000000..edbcaf340 --- /dev/null +++ b/newrelic/api/ml_model.py @@ -0,0 +1,35 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from newrelic.common.object_names import callable_name +from newrelic.hooks.mlmodel_sklearn import _nr_instrument_model + + +def wrap_mlmodel(model, name=None, version=None, feature_names=None, label_names=None, metadata=None): + model_callable_name = callable_name(model) + _class = model.__class__.__name__ + module = sys.modules[model_callable_name.split(":")[0]] + _nr_instrument_model(module, _class) + if name: + model._nr_wrapped_name = name + if version: + model._nr_wrapped_version = version + if feature_names: + model._nr_wrapped_feature_names = feature_names + if label_names: + model._nr_wrapped_label_names = label_names + if metadata: + model._nr_wrapped_metadata = metadata diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index d2bfc8528..988b56be6 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -60,11 +60,15 @@ DST_NONE, DST_TRANSACTION_TRACER, ) -from newrelic.core.config import CUSTOM_EVENT_RESERVOIR_SIZE, LOG_EVENT_RESERVOIR_SIZE +from newrelic.core.config import ( + CUSTOM_EVENT_RESERVOIR_SIZE, + LOG_EVENT_RESERVOIR_SIZE, + ML_EVENT_RESERVOIR_SIZE, +) from newrelic.core.custom_event import create_custom_event from newrelic.core.log_event_node import LogEventNode from newrelic.core.stack_trace import exception_stack -from newrelic.core.stats_engine import CustomMetrics, SampledDataSet +from newrelic.core.stats_engine import CustomMetrics, DimensionalMetrics, SampledDataSet from newrelic.core.thread_utilization import utilization_tracker from newrelic.core.trace_cache import ( TraceCacheActiveTraceError, @@ -305,6 +309,7 @@ def __init__(self, application, enabled=None, source=None): self.synthetics_header = None self._custom_metrics = CustomMetrics() + self._dimensional_metrics = DimensionalMetrics() global_settings = application.global_settings @@ -328,12 +333,14 @@ def __init__(self, application, enabled=None, source=None): self._custom_events = SampledDataSet( capacity=self._settings.event_harvest_config.harvest_limits.custom_event_data ) + self._ml_events = SampledDataSet(capacity=self._settings.event_harvest_config.harvest_limits.ml_event_data) self._log_events = SampledDataSet( capacity=self._settings.event_harvest_config.harvest_limits.log_event_data ) else: self._custom_events = SampledDataSet(capacity=CUSTOM_EVENT_RESERVOIR_SIZE) self._log_events = SampledDataSet(capacity=LOG_EVENT_RESERVOIR_SIZE) + self._ml_events = SampledDataSet(capacity=ML_EVENT_RESERVOIR_SIZE) def __del__(self): self._dead = True @@ -580,10 +587,12 @@ def __exit__(self, exc, value, tb): errors=tuple(self._errors), slow_sql=tuple(self._slow_sql), custom_events=self._custom_events, + ml_events=self._ml_events, log_events=self._log_events, apdex_t=self.apdex, suppress_apdex=self.suppress_apdex, custom_metrics=self._custom_metrics, + dimensional_metrics=self._dimensional_metrics, guid=self.guid, cpu_time=self._cpu_user_time_value, suppress_transaction_trace=self.suppress_transaction_trace, @@ -1607,6 +1616,16 @@ def record_custom_metrics(self, metrics): for name, value in metrics: self._custom_metrics.record_custom_metric(name, value) + def record_dimensional_metric(self, name, value, tags=None): + self._dimensional_metrics.record_dimensional_metric(name, value, tags) + + def record_dimensional_metrics(self, metrics): + for metric in metrics: + name, value = metric[:2] + tags = metric[2] if len(metric) >= 3 else None + + self._dimensional_metrics.record_dimensional_metric(name, value, tags) + def record_custom_event(self, event_type, params): settings = self._settings @@ -1620,6 +1639,19 @@ def record_custom_event(self, event_type, params): if event: self._custom_events.add(event, priority=self.priority) + def record_ml_event(self, event_type, params): + settings = self._settings + + if not settings: + return + + if not settings.ml_insights_events.enabled: + return + + event = create_custom_event(event_type, params) + if event: + self._ml_events.add(event, priority=self.priority) + def _intern_string(self, value): return self._string_cache.setdefault(value, value) @@ -1913,6 +1945,44 @@ def record_custom_metrics(metrics, application=None): application.record_custom_metrics(metrics) +def record_dimensional_metric(name, value, tags=None, application=None): + if application is None: + transaction = current_transaction() + if transaction: + transaction.record_dimensional_metric(name, value, tags) + else: + _logger.debug( + "record_dimensional_metric has been called but no " + "transaction was running. As a result, the following metric " + "has not been recorded. Name: %r Value: %r Tags: %r. To correct this " + "problem, supply an application object as a parameter to this " + "record_dimensional_metrics call.", + name, + value, + tags, + ) + elif application.enabled: + application.record_dimensional_metric(name, value, tags) + + +def record_dimensional_metrics(metrics, application=None): + if application is None: + transaction = current_transaction() + if transaction: + transaction.record_dimensional_metrics(metrics) + else: + _logger.debug( + "record_dimensional_metrics has been called but no " + "transaction was running. As a result, the following metrics " + "have not been recorded: %r. To correct this problem, " + "supply an application object as a parameter to this " + "record_dimensional_metric call.", + list(metrics), + ) + elif application.enabled: + application.record_dimensional_metrics(metrics) + + def record_custom_event(event_type, params, application=None): """Record a custom event. @@ -1941,6 +2011,34 @@ def record_custom_event(event_type, params, application=None): application.record_custom_event(event_type, params) +def record_ml_event(event_type, params, application=None): + """Record a machine learning custom event. + + Args: + event_type (str): The type (name) of the ml event. + params (dict): Attributes to add to the event. + application (newrelic.api.Application): Application instance. + + """ + + if application is None: + transaction = current_transaction() + if transaction: + transaction.record_ml_event(event_type, params) + else: + _logger.debug( + "record_ml_event has been called but no " + "transaction was running. As a result, the following event " + "has not been recorded. event_type: %r params: %r. To correct " + "this problem, supply an application object as a parameter to " + "this record_ml_event call.", + event_type, + params, + ) + elif application.enabled: + application.record_ml_event(event_type, params) + + def record_log_event(message, level=None, timestamp=None, application=None, priority=None): """Record a log event. diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index e1ba0b345..89876a60c 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -92,6 +92,7 @@ def __init__( compression_method="gzip", max_payload_size_in_bytes=1000000, audit_log_fp=None, + default_content_encoding_header="Identity", ): self._audit_log_fp = audit_log_fp @@ -112,9 +113,7 @@ def _supportability_request(params, payload, body, compression_time): pass @classmethod - def log_request( - cls, fp, method, url, params, payload, headers, body=None, compression_time=None - ): + def log_request(cls, fp, method, url, params, payload, headers, body=None, compression_time=None): cls._supportability_request(params, payload, body, compression_time) if not fp: @@ -126,7 +125,8 @@ def log_request( cls.AUDIT_LOG_ID += 1 print( - "TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=fp, + "TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), + file=fp, ) print(file=fp) print("ID: %r" % cls.AUDIT_LOG_ID, file=fp) @@ -178,9 +178,7 @@ def log_response(cls, fp, log_id, status, headers, data, connection="direct"): except Exception: result = data - print( - "TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=fp - ) + print("TIME: %r" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file=fp) print(file=fp) print("ID: %r" % log_id, file=fp) print(file=fp) @@ -219,9 +217,7 @@ def send_request( class HttpClient(BaseClient): CONNECTION_CLS = urllib3.HTTPSConnectionPool PREFIX_SCHEME = "https://" - BASE_HEADERS = urllib3.make_headers( - keep_alive=True, accept_encoding=True, user_agent=USER_AGENT - ) + BASE_HEADERS = urllib3.make_headers(keep_alive=True, accept_encoding=True, user_agent=USER_AGENT) def __init__( self, @@ -240,6 +236,7 @@ def __init__( compression_method="gzip", max_payload_size_in_bytes=1000000, audit_log_fp=None, + default_content_encoding_header="Identity", ): self._host = host port = self._port = port @@ -248,6 +245,7 @@ def __init__( self._compression_method = compression_method self._max_payload_size_in_bytes = max_payload_size_in_bytes self._audit_log_fp = audit_log_fp + self._default_content_encoding_header = default_content_encoding_header self._prefix = "" @@ -265,9 +263,7 @@ def __init__( # required and report this condition as a supportability metric. if not verify_path.cafile and not verify_path.capath: ca_bundle_path = certs.where() - internal_metric( - "Supportability/Python/Certificate/BundleRequired", 1 - ) + internal_metric("Supportability/Python/Certificate/BundleRequired", 1) if ca_bundle_path: if os.path.isdir(ca_bundle_path): @@ -279,11 +275,13 @@ def __init__( connection_kwargs["cert_reqs"] = "NONE" proxy = self._parse_proxy( - proxy_scheme, proxy_host, proxy_port, proxy_user, proxy_pass, - ) - proxy_headers = ( - proxy and proxy.auth and urllib3.make_headers(proxy_basic_auth=proxy.auth) + proxy_scheme, + proxy_host, + proxy_port, + proxy_user, + proxy_pass, ) + proxy_headers = proxy and proxy.auth and urllib3.make_headers(proxy_basic_auth=proxy.auth) if proxy: if self.CONNECTION_CLS.scheme == "https" and proxy.scheme != "https": @@ -343,15 +341,9 @@ def _connection(self): if self._connection_attr: return self._connection_attr - retries = urllib3.Retry( - total=False, connect=None, read=None, redirect=0, status=None - ) + retries = urllib3.Retry(total=False, connect=None, read=None, redirect=0, status=None) self._connection_attr = self.CONNECTION_CLS( - self._host, - self._port, - strict=True, - retries=retries, - **self._connection_kwargs + self._host, self._port, strict=True, retries=retries, **self._connection_kwargs ) return self._connection_attr @@ -374,9 +366,7 @@ def log_request( if not self._prefix: url = self.CONNECTION_CLS.scheme + "://" + self._host + url - return super(HttpClient, self).log_request( - fp, method, url, params, payload, headers, body, compression_time - ) + return super(HttpClient, self).log_request(fp, method, url, params, payload, headers, body, compression_time) @staticmethod def _compress(data, method="gzip", level=None): @@ -419,11 +409,9 @@ def send_request( method=self._compression_method, level=self._compression_level, ) - content_encoding = self._compression_method - else: - content_encoding = "Identity" - - merged_headers["Content-Encoding"] = content_encoding + merged_headers["Content-Encoding"] = self._compression_method + elif self._default_content_encoding_header: + merged_headers["Content-Encoding"] = self._default_content_encoding_header request_id = self.log_request( self._audit_log_fp, @@ -441,16 +429,16 @@ def send_request( try: response = self._connection.request_encode_url( - method, - path, - fields=params, - body=body, - headers=merged_headers, - **self._urlopen_kwargs + method, path, fields=params, body=body, headers=merged_headers, **self._urlopen_kwargs ) except urllib3.exceptions.HTTPError as e: self.log_response( - self._audit_log_fp, request_id, 0, None, None, connection, + self._audit_log_fp, + request_id, + 0, + None, + None, + connection, ) # All urllib3 HTTP errors should be treated as a network # interface exception. @@ -489,6 +477,7 @@ def __init__( compression_method="gzip", max_payload_size_in_bytes=1000000, audit_log_fp=None, + default_content_encoding_header="Identity", ): proxy = self._parse_proxy(proxy_scheme, proxy_host, None, None, None) if proxy and proxy.scheme == "https": @@ -515,6 +504,7 @@ def __init__( compression_method, max_payload_size_in_bytes, audit_log_fp, + default_content_encoding_header, ) @@ -536,9 +526,7 @@ def _supportability_request(params, payload, body, compression_time): "Supportability/Python/Collector/%s/ZLIB/Bytes" % agent_method, len(body), ) - internal_metric( - "Supportability/Python/Collector/ZLIB/Bytes", len(body) - ) + internal_metric("Supportability/Python/Collector/ZLIB/Bytes", len(body)) internal_metric( "Supportability/Python/Collector/%s/ZLIB/Compress" % agent_method, compression_time, @@ -548,28 +536,21 @@ def _supportability_request(params, payload, body, compression_time): len(payload), ) # Top level metric to aggregate overall bytes being sent - internal_metric( - "Supportability/Python/Collector/Output/Bytes", len(payload) - ) + internal_metric("Supportability/Python/Collector/Output/Bytes", len(payload)) @staticmethod def _supportability_response(status, exc, connection="direct"): if exc or not 200 <= status < 300: internal_count_metric("Supportability/Python/Collector/Failures", 1) - internal_count_metric( - "Supportability/Python/Collector/Failures/%s" % connection, 1 - ) + internal_count_metric("Supportability/Python/Collector/Failures/%s" % connection, 1) if exc: internal_count_metric( - "Supportability/Python/Collector/Exception/" - "%s" % callable_name(exc), + "Supportability/Python/Collector/Exception/" "%s" % callable_name(exc), 1, ) else: - internal_count_metric( - "Supportability/Python/Collector/HTTPError/%d" % status, 1 - ) + internal_count_metric("Supportability/Python/Collector/HTTPError/%d" % status, 1) class ApplicationModeClient(SupportabilityMixin, HttpClient): @@ -578,33 +559,31 @@ class ApplicationModeClient(SupportabilityMixin, HttpClient): class DeveloperModeClient(SupportabilityMixin, BaseClient): RESPONSES = { - "preconnect": {u"redirect_host": u"fake-collector.newrelic.com"}, + "preconnect": {"redirect_host": "fake-collector.newrelic.com"}, "agent_settings": [], "connect": { - u"js_agent_loader": u"", - u"js_agent_file": u"fake-js-agent.newrelic.com/nr-0.min.js", - u"browser_key": u"1234567890", - u"browser_monitoring.loader_version": u"0", - u"beacon": u"fake-beacon.newrelic.com", - u"error_beacon": u"fake-jserror.newrelic.com", - u"apdex_t": 0.5, - u"encoding_key": u"1111111111111111111111111111111111111111", - u"entity_guid": u"DEVELOPERMODEENTITYGUID", - u"agent_run_id": u"1234567", - u"product_level": 50, - u"trusted_account_ids": [12345], - u"trusted_account_key": u"12345", - u"url_rules": [], - u"collect_errors": True, - u"account_id": u"12345", - u"cross_process_id": u"12345#67890", - u"messages": [ - {u"message": u"Reporting to fake collector", u"level": u"INFO"} - ], - u"sampling_rate": 0, - u"collect_traces": True, - u"collect_span_events": True, - u"data_report_period": 60, + "js_agent_loader": "", + "js_agent_file": "fake-js-agent.newrelic.com/nr-0.min.js", + "browser_key": "1234567890", + "browser_monitoring.loader_version": "0", + "beacon": "fake-beacon.newrelic.com", + "error_beacon": "fake-jserror.newrelic.com", + "apdex_t": 0.5, + "encoding_key": "1111111111111111111111111111111111111111", + "entity_guid": "DEVELOPERMODEENTITYGUID", + "agent_run_id": "1234567", + "product_level": 50, + "trusted_account_ids": [12345], + "trusted_account_key": "12345", + "url_rules": [], + "collect_errors": True, + "account_id": "12345", + "cross_process_id": "12345#67890", + "messages": [{"message": "Reporting to fake collector", "level": "INFO"}], + "sampling_rate": 0, + "collect_traces": True, + "collect_span_events": True, + "data_report_period": 60, }, "metric_data": None, "get_agent_commands": [], @@ -648,7 +627,11 @@ def send_request( payload = {"return_value": result} response_data = json_encode(payload).encode("utf-8") self.log_response( - self._audit_log_fp, request_id, 200, {}, response_data, + self._audit_log_fp, + request_id, + 200, + {}, + response_data, ) return 200, response_data diff --git a/newrelic/common/metric_utils.py b/newrelic/common/metric_utils.py new file mode 100644 index 000000000..ebffe8332 --- /dev/null +++ b/newrelic/common/metric_utils.py @@ -0,0 +1,35 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module implements functions for creating a unique identity from a name and set of tags for use in dimensional metrics. +""" + +from newrelic.core.attribute import process_user_attribute + + +def create_metric_identity(name, tags=None): + if tags: + # Convert dicts to an iterable of tuples, other iterables should already be in this form + if isinstance(tags, dict): + tags = tags.items() + + # Apply attribute system sanitization. + # process_user_attribute returns (None, None) for results that fail sanitization. + # The filter removes these results from the iterable before creating the frozenset. + tags = frozenset(filter(lambda args: args[0] is not None, map(lambda args: process_user_attribute(*args), tags))) + + tags = tags or None # Set empty iterables after filtering to None + + return (name, tags) diff --git a/newrelic/config.py b/newrelic/config.py index 87c050d77..6816c43b5 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -328,6 +328,8 @@ def _process_configuration(section): _process_setting(section, "api_key", "get", None) _process_setting(section, "host", "get", None) _process_setting(section, "port", "getint", None) + _process_setting(section, "otlp_host", "get", None) + _process_setting(section, "otlp_port", "getint", None) _process_setting(section, "ssl", "getboolean", None) _process_setting(section, "proxy_scheme", "get", None) _process_setting(section, "proxy_host", "get", None) @@ -441,6 +443,7 @@ def _process_configuration(section): ) _process_setting(section, "custom_insights_events.enabled", "getboolean", None) _process_setting(section, "custom_insights_events.max_samples_stored", "getint", None) + _process_setting(section, "ml_insights_events.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.exclude_newrelic_header", "getboolean", None) _process_setting(section, "span_events.enabled", "getboolean", None) @@ -500,6 +503,7 @@ def _process_configuration(section): _process_setting(section, "debug.disable_certificate_validation", "getboolean", None) _process_setting(section, "debug.disable_harvest_until_shutdown", "getboolean", None) _process_setting(section, "debug.connect_span_stream_in_developer_mode", "getboolean", None) + _process_setting(section, "debug.otlp_content_encoding", "get", None) _process_setting(section, "cross_application_tracer.enabled", "getboolean", None) _process_setting(section, "message_tracer.segment_parameters_enabled", "getboolean", None) _process_setting(section, "process_host.display_name", "get", None) @@ -534,6 +538,7 @@ def _process_configuration(section): None, ) _process_setting(section, "event_harvest_config.harvest_limits.custom_event_data", "getint", None) + _process_setting(section, "event_harvest_config.harvest_limits.ml_event_data", "getint", None) _process_setting(section, "event_harvest_config.harvest_limits.span_event_data", "getint", None) _process_setting(section, "event_harvest_config.harvest_limits.error_event_data", "getint", None) _process_setting(section, "event_harvest_config.harvest_limits.log_event_data", "getint", None) @@ -550,6 +555,9 @@ def _process_configuration(section): _process_setting(section, "application_logging.metrics.enabled", "getboolean", None) _process_setting(section, "application_logging.local_decorating.enabled", "getboolean", None) + _process_setting(section, "machine_learning.enabled", "getboolean", None) + _process_setting(section, "machine_learning.inference_events_value.enabled", "getboolean", None) + # Loading of configuration from specified file and for specified # deployment environment. Can also indicate whether configuration @@ -881,6 +889,10 @@ def apply_local_high_security_mode_setting(settings): settings.custom_insights_events.enabled = False _logger.info(log_template, "custom_insights_events.enabled", True, False) + if settings.ml_insights_events.enabled: + settings.ml_insights_events.enabled = False + _logger.info(log_template, "ml_insights_events.enabled", True, False) + if settings.message_tracer.segment_parameters_enabled: settings.message_tracer.segment_parameters_enabled = False _logger.info(log_template, "message_tracer.segment_parameters_enabled", True, False) @@ -889,6 +901,10 @@ def apply_local_high_security_mode_setting(settings): settings.application_logging.forwarding.enabled = False _logger.info(log_template, "application_logging.forwarding.enabled", True, False) + if settings.machine_learning.inference_events_value.enabled: + settings.machine_learning.inference_events_value.enabled = False + _logger.info(log_template, "machine_learning.inference_events_value.enabled", True, False) + return settings @@ -2988,6 +3004,756 @@ def _process_module_builtin_defaults(): ) _process_module_definition("tastypie.api", "newrelic.hooks.component_tastypie", "instrument_tastypie_api") + _process_module_definition( + "sklearn.metrics", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_metrics", + ) + + _process_module_definition( + "sklearn.tree._classes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_tree_models", + ) + # In scikit-learn < 0.21 the model classes are in tree.py instead of _classes.py. + _process_module_definition( + "sklearn.tree.tree", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_tree_models", + ) + + _process_module_definition( + "sklearn.compose._column_transformer", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_compose_models", + ) + + _process_module_definition( + "sklearn.compose._target", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_compose_models", + ) + + _process_module_definition( + "sklearn.covariance._empirical_covariance", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.covariance.empirical_covariance_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.covariance.shrunk_covariance_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_shrunk_models", + ) + + _process_module_definition( + "sklearn.covariance._shrunk_covariance", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_shrunk_models", + ) + + _process_module_definition( + "sklearn.covariance.robust_covariance_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.covariance._robust_covariance", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.covariance.graph_lasso_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_graph_models", + ) + + _process_module_definition( + "sklearn.covariance._graph_lasso", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_graph_models", + ) + + _process_module_definition( + "sklearn.covariance.elliptic_envelope", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.covariance._elliptic_envelope", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_covariance_models", + ) + + _process_module_definition( + "sklearn.ensemble._bagging", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_bagging_models", + ) + + _process_module_definition( + "sklearn.ensemble.bagging", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_bagging_models", + ) + + _process_module_definition( + "sklearn.ensemble._forest", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_forest_models", + ) + + _process_module_definition( + "sklearn.ensemble.forest", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_forest_models", + ) + + _process_module_definition( + "sklearn.ensemble._iforest", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_iforest_models", + ) + + _process_module_definition( + "sklearn.ensemble.iforest", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_iforest_models", + ) + + _process_module_definition( + "sklearn.ensemble._weight_boosting", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_weight_boosting_models", + ) + + _process_module_definition( + "sklearn.ensemble.weight_boosting", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_weight_boosting_models", + ) + + _process_module_definition( + "sklearn.ensemble._gb", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_gradient_boosting_models", + ) + + _process_module_definition( + "sklearn.ensemble.gradient_boosting", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_gradient_boosting_models", + ) + + _process_module_definition( + "sklearn.ensemble._voting", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_voting_models", + ) + + _process_module_definition( + "sklearn.ensemble.voting_classifier", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_voting_models", + ) + + _process_module_definition( + "sklearn.ensemble._stacking", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_stacking_models", + ) + + _process_module_definition( + "sklearn.ensemble._hist_gradient_boosting.gradient_boosting", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_ensemble_hist_models", + ) + + _process_module_definition( + "sklearn.linear_model._base", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model.base", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model._bayes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_bayes_models", + ) + + _process_module_definition( + "sklearn.linear_model.bayes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_bayes_models", + ) + + _process_module_definition( + "sklearn.linear_model._least_angle", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_least_angle_models", + ) + + _process_module_definition( + "sklearn.linear_model.least_angle", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_least_angle_models", + ) + + _process_module_definition( + "sklearn.linear_model.coordinate_descent", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_coordinate_descent_models", + ) + + _process_module_definition( + "sklearn.linear_model._coordinate_descent", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_coordinate_descent_models", + ) + + _process_module_definition( + "sklearn.linear_model._glm", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_GLM_models", + ) + + _process_module_definition( + "sklearn.linear_model._huber", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model.huber", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model._stochastic_gradient", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_stochastic_gradient_models", + ) + + _process_module_definition( + "sklearn.linear_model.stochastic_gradient", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_stochastic_gradient_models", + ) + + _process_module_definition( + "sklearn.linear_model._ridge", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_ridge_models", + ) + + _process_module_definition( + "sklearn.linear_model.ridge", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_ridge_models", + ) + + _process_module_definition( + "sklearn.linear_model._logistic", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_logistic_models", + ) + + _process_module_definition( + "sklearn.linear_model.logistic", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_logistic_models", + ) + + _process_module_definition( + "sklearn.linear_model._omp", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_OMP_models", + ) + + _process_module_definition( + "sklearn.linear_model.omp", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_OMP_models", + ) + + _process_module_definition( + "sklearn.linear_model._passive_aggressive", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_passive_aggressive_models", + ) + + _process_module_definition( + "sklearn.linear_model.passive_aggressive", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_passive_aggressive_models", + ) + + _process_module_definition( + "sklearn.linear_model._perceptron", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model.perceptron", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model._quantile", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model._ransac", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model.ransac", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model._theil_sen", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.linear_model.theil_sen", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_linear_models", + ) + + _process_module_definition( + "sklearn.cross_decomposition._pls", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cross_decomposition_models", + ) + + _process_module_definition( + "sklearn.cross_decomposition.pls_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cross_decomposition_models", + ) + + _process_module_definition( + "sklearn.discriminant_analysis", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_discriminant_analysis_models", + ) + + _process_module_definition( + "sklearn.gaussian_process._gpc", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_gaussian_process_models", + ) + + _process_module_definition( + "sklearn.gaussian_process.gpc", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_gaussian_process_models", + ) + + _process_module_definition( + "sklearn.gaussian_process._gpr", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_gaussian_process_models", + ) + + _process_module_definition( + "sklearn.gaussian_process.gpr", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_gaussian_process_models", + ) + + _process_module_definition( + "sklearn.dummy", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_dummy_models", + ) + + _process_module_definition( + "sklearn.feature_selection._rfe", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_rfe_models", + ) + + _process_module_definition( + "sklearn.feature_selection.rfe", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_rfe_models", + ) + + _process_module_definition( + "sklearn.feature_selection._variance_threshold", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_models", + ) + + _process_module_definition( + "sklearn.feature_selection.variance_threshold", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_models", + ) + + _process_module_definition( + "sklearn.feature_selection._from_model", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_models", + ) + + _process_module_definition( + "sklearn.feature_selection.from_model", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_models", + ) + + _process_module_definition( + "sklearn.feature_selection._sequential", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_feature_selection_models", + ) + + _process_module_definition( + "sklearn.kernel_ridge", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_kernel_ridge_models", + ) + + _process_module_definition( + "sklearn.neural_network._multilayer_perceptron", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neural_network_models", + ) + + _process_module_definition( + "sklearn.neural_network.multilayer_perceptron", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neural_network_models", + ) + + _process_module_definition( + "sklearn.neural_network._rbm", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neural_network_models", + ) + + _process_module_definition( + "sklearn.neural_network.rbm", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neural_network_models", + ) + + _process_module_definition( + "sklearn.calibration", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_calibration_models", + ) + + _process_module_definition( + "sklearn.cluster._affinity_propagation", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster.affinity_propagation_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._agglomerative", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_agglomerative_models", + ) + + _process_module_definition( + "sklearn.cluster.hierarchical", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_agglomerative_models", + ) + + _process_module_definition( + "sklearn.cluster._birch", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster.birch", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._bisect_k_means", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_kmeans_models", + ) + + _process_module_definition( + "sklearn.cluster._dbscan", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster.dbscan_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._feature_agglomeration", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._kmeans", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_kmeans_models", + ) + + _process_module_definition( + "sklearn.cluster.k_means_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_kmeans_models", + ) + + _process_module_definition( + "sklearn.cluster._mean_shift", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster.mean_shift_", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._optics", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_models", + ) + + _process_module_definition( + "sklearn.cluster._spectral", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_clustering_models", + ) + + _process_module_definition( + "sklearn.cluster.spectral", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_clustering_models", + ) + + _process_module_definition( + "sklearn.cluster._bicluster", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_clustering_models", + ) + + _process_module_definition( + "sklearn.cluster.bicluster", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_cluster_clustering_models", + ) + + _process_module_definition( + "sklearn.multiclass", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_multiclass_models", + ) + + _process_module_definition( + "sklearn.multioutput", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_multioutput_models", + ) + + _process_module_definition( + "sklearn.naive_bayes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_naive_bayes_models", + ) + + _process_module_definition( + "sklearn.model_selection._search", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_model_selection_models", + ) + + _process_module_definition( + "sklearn.mixture._bayesian_mixture", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_mixture_models", + ) + + _process_module_definition( + "sklearn.mixture.bayesian_mixture", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_mixture_models", + ) + + _process_module_definition( + "sklearn.mixture._gaussian_mixture", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_mixture_models", + ) + + _process_module_definition( + "sklearn.mixture.gaussian_mixture", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_mixture_models", + ) + + _process_module_definition( + "sklearn.pipeline", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_pipeline_models", + ) + + _process_module_definition( + "sklearn.semi_supervised._label_propagation", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_semi_supervised_models", + ) + + _process_module_definition( + "sklearn.semi_supervised._self_training", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_semi_supervised_models", + ) + + _process_module_definition( + "sklearn.semi_supervised.label_propagation", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_semi_supervised_models", + ) + + _process_module_definition( + "sklearn.svm._classes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_svm_models", + ) + + _process_module_definition( + "sklearn.svm.classes", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_svm_models", + ) + + _process_module_definition( + "sklearn.neighbors._classification", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_KRadius_models", + ) + + _process_module_definition( + "sklearn.neighbors.classification", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_KRadius_models", + ) + + _process_module_definition( + "sklearn.neighbors._graph", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_KRadius_models", + ) + + _process_module_definition( + "sklearn.neighbors._kde", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors.kde", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors._lof", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors.lof", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors._nca", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors._nearest_centroid", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors.nearest_centroid", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors._regression", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_KRadius_models", + ) + + _process_module_definition( + "sklearn.neighbors.regression", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_KRadius_models", + ) + + _process_module_definition( + "sklearn.neighbors._unsupervised", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + + _process_module_definition( + "sklearn.neighbors.unsupervised", + "newrelic.hooks.mlmodel_sklearn", + "instrument_sklearn_neighbors_models", + ) + _process_module_definition( "rest_framework.views", "newrelic.hooks.component_djangorestframework", @@ -3020,9 +3786,7 @@ def _process_module_builtin_defaults(): "newrelic.hooks.application_celery", "instrument_celery_worker", ) - # _process_module_definition('celery.loaders.base', - # 'newrelic.hooks.application_celery', - # 'instrument_celery_loaders_base') + _process_module_definition( "celery.execute.trace", "newrelic.hooks.application_celery", diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index 6ab9571a4..9d9aadab1 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -524,6 +524,33 @@ def record_custom_metrics(self, app_name, metrics): application.record_custom_metrics(metrics) + def record_dimensional_metric(self, app_name, name, value, tags=None): + """Records a basic metric for the named application. If there has + been no prior request to activate the application, the metric is + discarded. + + """ + + application = self._applications.get(app_name, None) + if application is None or not application.active: + return + + application.record_dimensional_metric(name, value, tags) + + def record_dimensional_metrics(self, app_name, metrics): + """Records the metrics for the named application. If there has + been no prior request to activate the application, the metric is + discarded. The metrics should be an iterable yielding tuples + consisting of the name and value. + + """ + + application = self._applications.get(app_name, None) + if application is None or not application.active: + return + + application.record_dimensional_metrics(metrics) + def record_custom_event(self, app_name, event_type, params): application = self._applications.get(app_name, None) if application is None or not application.active: @@ -531,6 +558,13 @@ def record_custom_event(self, app_name, event_type, params): application.record_custom_event(event_type, params) + def record_ml_event(self, app_name, event_type, params): + application = self._applications.get(app_name, None) + if application is None or not application.active: + return + + application.record_ml_event(event_type, params) + def record_log_event(self, app_name, message, level=None, timestamp=None, priority=None): application = self._applications.get(app_name, None) if application is None or not application.active: diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index ba277d4de..dd4dc264f 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -38,6 +38,7 @@ global_settings_dump, ) from newrelic.core.internal_metrics import internal_count_metric +from newrelic.core.otlp_utils import OTLP_CONTENT_TYPE, otlp_encode from newrelic.network.exceptions import ( DiscardDataForRequest, ForceAgentDisconnect, @@ -143,7 +144,9 @@ class AgentProtocol(object): "transaction_tracer.record_sql", "strip_exception_messages.enabled", "custom_insights_events.enabled", + "ml_insights_events.enabled", "application_logging.forwarding.enabled", + "machine_learning.inference_events_value.enabled", ) LOGGER_FUNC_MAPPING = { @@ -215,11 +218,16 @@ def __exit__(self, exc, value, tb): def close_connection(self): self.client.close_connection() - def send(self, method, payload=()): + def send( + self, + method, + payload=(), + path="/agent_listener/invoke_raw_method", + ): params, headers, payload = self._to_http(method, payload) try: - response = self.client.send_request(params=params, headers=headers, payload=payload) + response = self.client.send_request(path=path, params=params, headers=headers, payload=payload) except NetworkInterfaceException: # All HTTP errors are currently retried raise RetryDataForRequest @@ -251,7 +259,10 @@ def send(self, method, payload=()): exception = self.STATUS_CODE_RESPONSE.get(status, DiscardDataForRequest) raise exception if status == 200: - return json_decode(data.decode("utf-8"))["return_value"] + return self.decode_response(data) + + def decode_response(self, response): + return json_decode(response.decode("utf-8"))["return_value"] def _to_http(self, method, payload=()): params = dict(self._params) @@ -514,3 +525,77 @@ def connect( # can be modified later settings.aws_lambda_metadata = aws_lambda_metadata return cls(settings, client_cls=client_cls) + + +class OtlpProtocol(AgentProtocol): + def __init__(self, settings, host=None, client_cls=ApplicationModeClient): + if settings.audit_log_file: + audit_log_fp = open(settings.audit_log_file, "a") + else: + audit_log_fp = None + + self.client = client_cls( + host=host or settings.otlp_host, + port=settings.otlp_port or 4318, + proxy_scheme=settings.proxy_scheme, + proxy_host=settings.proxy_host, + proxy_port=settings.proxy_port, + proxy_user=settings.proxy_user, + proxy_pass=settings.proxy_pass, + timeout=settings.agent_limits.data_collector_timeout, + ca_bundle_path=settings.ca_bundle_path, + disable_certificate_validation=settings.debug.disable_certificate_validation, + compression_threshold=settings.agent_limits.data_compression_threshold, + compression_level=settings.agent_limits.data_compression_level, + compression_method=settings.compressed_content_encoding, + max_payload_size_in_bytes=1000000, + audit_log_fp=audit_log_fp, + default_content_encoding_header=None, + ) + + self._params = {} + self._headers = { + "api-key": settings.license_key, + } + + # In Python 2, the JSON is loaded with unicode keys and values; + # however, the header name must be a non-unicode value when given to + # the HTTP library. This code converts the header name from unicode to + # non-unicode. + if settings.request_headers_map: + for k, v in settings.request_headers_map.items(): + if not isinstance(k, str): + k = k.encode("utf-8") + self._headers[k] = v + + # Content-Type should be protobuf, but falls back to JSON if protobuf is not installed. + self._headers["Content-Type"] = OTLP_CONTENT_TYPE + self._run_token = settings.agent_run_id + + # Logging + self._proxy_host = settings.proxy_host + self._proxy_port = settings.proxy_port + self._proxy_user = settings.proxy_user + + # Do not access configuration anywhere inside the class + self.configuration = settings + + @classmethod + def connect( + cls, + app_name, + linked_applications, + environment, + settings, + client_cls=ApplicationModeClient, + ): + with cls(settings, client_cls=client_cls) as protocol: + pass + + return protocol + + def _to_http(self, method, payload=()): + return {}, self._headers, otlp_encode(payload) + + def decode_response(self, response): + return response.decode("utf-8") diff --git a/newrelic/core/application.py b/newrelic/core/application.py index 7be217428..82cdf8a9a 100644 --- a/newrelic/core/application.py +++ b/newrelic/core/application.py @@ -510,6 +510,9 @@ def connect_to_data_collector(self, activate_agent): with self._stats_custom_lock: self._stats_custom_engine.reset_stats(configuration) + with self._stats_lock: + self._stats_engine.reset_stats(configuration) + # Record an initial start time for the reporting period and # clear record of last transaction processed. @@ -860,6 +863,50 @@ def record_custom_metrics(self, metrics): self._global_events_account += 1 self._stats_custom_engine.record_custom_metric(name, value) + def record_dimensional_metric(self, name, value, tags=None): + """Record a dimensional metric against the application independent + of a specific transaction. + + NOTE that this will require locking of the stats engine for + dimensional metrics and so under heavy use will have performance + issues. It is better to record the dimensional metric against an + active transaction as they will then be aggregated at the end of + the transaction when all other metrics are aggregated and so no + additional locking will be required. + + """ + + if not self._active_session: + return + + with self._stats_lock: + self._global_events_account += 1 + self._stats_engine.record_dimensional_metric(name, value, tags) + + def record_dimensional_metrics(self, metrics): + """Record a set of dimensional metrics against the application + independent of a specific transaction. + + NOTE that this will require locking of the stats engine for + dimensional metrics and so under heavy use will have performance + issues. It is better to record the dimensional metric against an + active transaction as they will then be aggregated at the end of + the transaction when all other metrics are aggregated and so no + additional locking will be required. + + """ + + if not self._active_session: + return + + with self._stats_lock: + for metric in metrics: + name, value = metric[:2] + tags = metric[2] if len(metric) >= 3 else None + + self._global_events_account += 1 + self._stats_engine.record_dimensional_metric(name, value, tags) + def record_custom_event(self, event_type, params): if not self._active_session: return @@ -876,6 +923,22 @@ def record_custom_event(self, event_type, params): self._global_events_account += 1 self._stats_engine.record_custom_event(event) + def record_ml_event(self, event_type, params): + if not self._active_session: + return + + settings = self._stats_engine.settings + + if settings is None or not settings.ml_insights_events.enabled: + return + + event = create_custom_event(event_type, params) + + if event: + with self._stats_custom_lock: + self._global_events_account += 1 + self._stats_engine.record_ml_event(event) + def record_log_event(self, message, level=None, timestamp=None, priority=None): if not self._active_session: return @@ -1335,6 +1398,26 @@ def harvest(self, shutdown=False, flexible=False): stats.reset_custom_events() + # Send machine learning events + + if configuration.ml_insights_events.enabled: + ml_events = stats.ml_events + + if ml_events: + if ml_events.num_samples > 0: + ml_event_samples = list(ml_events) + + _logger.debug("Sending machine learning event data for harvest of %r.", self._app_name) + + self._active_session.send_ml_events(ml_events.sampling_info, ml_event_samples) + ml_event_samples = None + + # As per spec + internal_count_metric("Supportability/Events/Customer/Seen", ml_events.num_seen) + internal_count_metric("Supportability/Events/Customer/Sent", ml_events.num_samples) + + stats.reset_ml_events() + # Send log events if ( @@ -1416,11 +1499,14 @@ def harvest(self, shutdown=False, flexible=False): _logger.debug("Normalizing metrics for harvest of %r.", self._app_name) metric_data = stats.metric_data(metric_normalizer) + dimensional_metric_data = stats.dimensional_metric_data(metric_normalizer) _logger.debug("Sending metric data for harvest of %r.", self._app_name) # Send metrics self._active_session.send_metric_data(self._period_start, period_end, metric_data) + if dimensional_metric_data: + self._active_session.send_dimensional_metric_data(self._period_start, period_end, dimensional_metric_data) _logger.debug("Done sending data for harvest of %r.", self._app_name) diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 372711369..10ae8e459 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -180,7 +180,6 @@ def create_user_attributes(attr_dict, attribute_filter): def truncate(text, maxsize=MAX_ATTRIBUTE_LENGTH, encoding="utf-8", ending=None): - # Truncate text so that its byte representation # is no longer than maxsize bytes. @@ -225,7 +224,6 @@ def check_max_int(value, max_int=MAX_64_BIT_INT): def process_user_attribute(name, value, max_length=MAX_ATTRIBUTE_LENGTH, ending=None): - # Perform all necessary checks on a potential attribute. # # Returns: @@ -245,23 +243,22 @@ def process_user_attribute(name, value, max_length=MAX_ATTRIBUTE_LENGTH, ending= value = sanitize(value) except NameIsNotStringException: - _logger.debug("Attribute name must be a string. Dropping " "attribute: %r=%r", name, value) + _logger.debug("Attribute name must be a string. Dropping attribute: %r=%r", name, value) return FAILED_RESULT except NameTooLongException: - _logger.debug("Attribute name exceeds maximum length. Dropping " "attribute: %r=%r", name, value) + _logger.debug("Attribute name exceeds maximum length. Dropping attribute: %r=%r", name, value) return FAILED_RESULT except IntTooLargeException: - _logger.debug("Attribute value exceeds maximum integer value. " "Dropping attribute: %r=%r", name, value) + _logger.debug("Attribute value exceeds maximum integer value. Dropping attribute: %r=%r", name, value) return FAILED_RESULT except CastingFailureException: - _logger.debug("Attribute value cannot be cast to a string. " "Dropping attribute: %r=%r", name, value) + _logger.debug("Attribute value cannot be cast to a string. Dropping attribute: %r=%r", name, value) return FAILED_RESULT else: - # Check length after casting valid_types_text = (six.text_type, six.binary_type) @@ -270,7 +267,7 @@ def process_user_attribute(name, value, max_length=MAX_ATTRIBUTE_LENGTH, ending= trunc_value = truncate(value, maxsize=max_length, ending=ending) if value != trunc_value: _logger.debug( - "Attribute value exceeds maximum length " "(%r bytes). Truncating value: %r=%r.", + "Attribute value exceeds maximum length (%r bytes). Truncating value: %r=%r.", max_length, name, trunc_value, @@ -282,15 +279,31 @@ def process_user_attribute(name, value, max_length=MAX_ATTRIBUTE_LENGTH, ending= def sanitize(value): + """ + Return value unchanged, if it's a valid type that is supported by + Insights. Otherwise, convert value to a string. - # Return value unchanged, if it's a valid type that is supported by - # Insights. Otherwise, convert value to a string. - # - # Raise CastingFailureException, if str(value) somehow fails. + Raise CastingFailureException, if str(value) somehow fails. + """ valid_value_types = (six.text_type, six.binary_type, bool, float, six.integer_types) - if not isinstance(value, valid_value_types): + # When working with numpy, note that numpy has its own `int`s, `str`s, + # et cetera. `numpy.str_` and `numpy.float_` inherit from Python's native + # `str` and `float`, respectively. However, some types, such as `numpy.int_` + # and `numpy.bool_`, do not inherit from `int` and `bool` (respectively). + # In those cases, the valid_value_types check fails and it will try to + # convert these to string, which is not the desired behavior. Checking for + # `type` in lieu of `isinstance` has the potential to impact performance. + + # numpy values have an attribute "item" that returns the closest + # equivalent Python native type. Ex: numpy.int64 -> int + # This is important to utilize in cases like int and bool where + # numpy does not inherit from those classes. This logic is + # determining whether or not the value is a valid_value_type (or + # inherited from one of those types) AND whether it is a numpy + # type (by determining if it has the attribute "item"). + if not isinstance(value, valid_value_types) and not hasattr(value, "item"): original = value try: @@ -298,8 +311,6 @@ def sanitize(value): except Exception: raise CastingFailureException() else: - _logger.debug( - "Attribute value is of type: %r. Casting %r to " "string: %s", type(original), original, value - ) + _logger.debug("Attribute value is of type: %r. Casting %r to string: %s", type(original), original, value) return value diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 7489be222..483e23df8 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -51,11 +51,14 @@ # By default, Transaction Events and Custom Events have the same size # reservoir. Error Events have a different default size. +# Slow harvest (Every 60 seconds) DEFAULT_RESERVOIR_SIZE = 1200 -CUSTOM_EVENT_RESERVOIR_SIZE = 3600 ERROR_EVENT_RESERVOIR_SIZE = 100 SPAN_EVENT_RESERVOIR_SIZE = 2000 +# Fast harvest (Every 5 seconds, so divide by 12 to get average per minute value) +CUSTOM_EVENT_RESERVOIR_SIZE = 3600 LOG_EVENT_RESERVOIR_SIZE = 10000 +ML_EVENT_RESERVOIR_SIZE = 100000 # settings that should be completely ignored if set server side IGNORED_SERVER_SIDE_SETTINGS = [ @@ -101,6 +104,7 @@ def create_settings(nested): class TopLevelSettings(Settings): _host = None + _otlp_host = None @property def host(self): @@ -112,6 +116,16 @@ def host(self): def host(self, value): self._host = value + @property + def otlp_host(self): + if self._otlp_host: + return self._otlp_host + return default_otlp_host(self.host) + + @otlp_host.setter + def otlp_host(self, value): + self._otlp_host = value + class AttributesSettings(Settings): pass @@ -121,6 +135,14 @@ class GCRuntimeMetricsSettings(Settings): enabled = False +class MachineLearningSettings(Settings): + pass + + +class MachineLearningInferenceEventsValueSettings(Settings): + pass + + class CodeLevelMetricsSettings(Settings): pass @@ -199,6 +221,10 @@ class CustomInsightsEventsSettings(Settings): pass +class MlInsightsEventsSettings(Settings): + pass + + class ProcessHostSettings(Settings): pass @@ -370,6 +396,8 @@ class EventHarvestConfigHarvestLimitSettings(Settings): _settings.application_logging.forwarding = ApplicationLoggingForwardingSettings() _settings.application_logging.local_decorating = ApplicationLoggingLocalDecoratingSettings() _settings.application_logging.metrics = ApplicationLoggingMetricsSettings() +_settings.machine_learning = MachineLearningSettings() +_settings.machine_learning.inference_events_value = MachineLearningInferenceEventsValueSettings() _settings.attributes = AttributesSettings() _settings.browser_monitoring = BrowserMonitorSettings() _settings.browser_monitoring.attributes = BrowserMonitorAttributesSettings() @@ -377,6 +405,7 @@ class EventHarvestConfigHarvestLimitSettings(Settings): _settings.console = ConsoleSettings() _settings.cross_application_tracer = CrossApplicationTracerSettings() _settings.custom_insights_events = CustomInsightsEventsSettings() +_settings.ml_insights_events = MlInsightsEventsSettings() _settings.datastore_tracer = DatastoreTracerSettings() _settings.datastore_tracer.database_name_reporting = DatastoreTracerDatabaseNameReportingSettings() _settings.datastore_tracer.instance_reporting = DatastoreTracerInstanceReportingSettings() @@ -542,6 +571,24 @@ def default_host(license_key): return host +def default_otlp_host(host): + HOST_MAP = { + "collector.newrelic.com": "otlp.nr-data.net", + "collector.eu.newrelic.com": "otlp.eu01.nr-data.net", + "gov-collector.newrelic.com": "gov-otlp.nr-data.net", + "staging-collector.newrelic.com": "staging-otlp.nr-data.net", + "staging-collector.eu.newrelic.com": "staging-otlp.eu01.nr-data.net", + "staging-gov-collector.newrelic.com": "staging-gov-otlp.nr-data.net", + "fake-collector.newrelic.com": "fake-otlp.nr-data.net", + } + otlp_host = HOST_MAP.get(host, None) + if not otlp_host: + default = HOST_MAP["collector.newrelic.com"] + _logger.warn("Unable to find corresponding OTLP host using default %s" % default) + otlp_host = default + return otlp_host + + _LOG_LEVEL = { "CRITICAL": logging.CRITICAL, "ERROR": logging.ERROR, @@ -567,7 +614,9 @@ def default_host(license_key): _settings.ssl = _environ_as_bool("NEW_RELIC_SSL", True) _settings.host = os.environ.get("NEW_RELIC_HOST") +_settings.otlp_host = os.environ.get("NEW_RELIC_OTLP_HOST") _settings.port = int(os.environ.get("NEW_RELIC_PORT", "0")) +_settings.otlp_port = int(os.environ.get("NEW_RELIC_OTLP_PORT", "0")) _settings.agent_run_id = None _settings.entity_guid = None @@ -668,6 +717,7 @@ def default_host(license_key): _settings.transaction_events.attributes.include = [] _settings.custom_insights_events.enabled = True +_settings.ml_insights_events.enabled = False _settings.distributed_tracing.enabled = _environ_as_bool("NEW_RELIC_DISTRIBUTED_TRACING_ENABLED", default=True) _settings.distributed_tracing.exclude_newrelic_header = False @@ -760,6 +810,10 @@ def default_host(license_key): "NEW_RELIC_CUSTOM_INSIGHTS_EVENTS_MAX_SAMPLES_STORED", CUSTOM_EVENT_RESERVOIR_SIZE ) +_settings.event_harvest_config.harvest_limits.ml_event_data = _environ_as_int( + "NEW_RELIC_ML_INSIGHTS_EVENTS_MAX_SAMPLES_STORED", ML_EVENT_RESERVOIR_SIZE +) + _settings.event_harvest_config.harvest_limits.span_event_data = _environ_as_int( "NEW_RELIC_SPAN_EVENTS_MAX_SAMPLES_STORED", SPAN_EVENT_RESERVOIR_SIZE ) @@ -797,6 +851,7 @@ def default_host(license_key): _settings.debug.log_untrusted_distributed_trace_keys = False _settings.debug.disable_harvest_until_shutdown = False _settings.debug.connect_span_stream_in_developer_mode = False +_settings.debug.otlp_content_encoding = None _settings.message_tracer.segment_parameters_enabled = True @@ -839,6 +894,10 @@ def default_host(license_key): _settings.application_logging.local_decorating.enabled = _environ_as_bool( "NEW_RELIC_APPLICATION_LOGGING_LOCAL_DECORATING_ENABLED", default=False ) +_settings.machine_learning.enabled = _environ_as_bool("NEW_RELIC_MACHINE_LEARNING_ENABLED", default=False) +_settings.machine_learning.inference_events_value.enabled = _environ_as_bool( + "NEW_RELIC_MACHINE_LEARNING_INFERENCE_EVENT_VALUE_ENABLED", default=False +) def global_settings(): @@ -1083,8 +1142,8 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): apply_config_setting(settings_snapshot, name, value) # Overlay with global server side configuration settings. - # global server side configuration always takes precedence over the global - # server side configuration settings. + # global server side configuration always takes precedence over the local + # agent configuration settings. for name, value in server_side_config.items(): apply_config_setting(settings_snapshot, name, value) @@ -1101,6 +1160,16 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): settings_snapshot, "event_harvest_config.harvest_limits.span_event_data", span_event_harvest_limit ) + # Since the server does not override this setting as it's an OTLP setting, + # we must override it here manually by converting it into a per harvest cycle + # value. + apply_config_setting( + settings_snapshot, + "event_harvest_config.harvest_limits.ml_event_data", + # override ml_events / (60s/5s) harvest + settings_snapshot.event_harvest_config.harvest_limits.ml_event_data / 12, + ) + # This will be removed at some future point # Special case for account_id which will be sent instead of # cross_process_id in the future diff --git a/newrelic/core/data_collector.py b/newrelic/core/data_collector.py index 985e37240..269139664 100644 --- a/newrelic/core/data_collector.py +++ b/newrelic/core/data_collector.py @@ -25,21 +25,30 @@ DeveloperModeClient, ServerlessModeClient, ) -from newrelic.core.agent_protocol import AgentProtocol, ServerlessModeProtocol +from newrelic.core.agent_protocol import ( + AgentProtocol, + OtlpProtocol, + ServerlessModeProtocol, +) from newrelic.core.agent_streaming import StreamingRpc from newrelic.core.config import global_settings +from newrelic.core.otlp_utils import encode_metric_data, encode_ml_event_data _logger = logging.getLogger(__name__) class Session(object): PROTOCOL = AgentProtocol + OTLP_PROTOCOL = OtlpProtocol CLIENT = ApplicationModeClient def __init__(self, app_name, linked_applications, environment, settings): self._protocol = self.PROTOCOL.connect( app_name, linked_applications, environment, settings, client_cls=self.CLIENT ) + self._otlp_protocol = self.OTLP_PROTOCOL.connect( + app_name, linked_applications, environment, settings, client_cls=self.CLIENT + ) self._rpc = None @property @@ -112,6 +121,11 @@ def send_custom_events(self, sampling_info, custom_event_data): payload = (self.agent_run_id, sampling_info, custom_event_data) return self._protocol.send("custom_event_data", payload) + def send_ml_events(self, sampling_info, custom_event_data): + """Called to submit sample set for machine learning events.""" + payload = encode_ml_event_data(custom_event_data, str(self.agent_run_id)) + return self._otlp_protocol.send("ml_event_data", payload, path="/v1/logs") + def send_span_events(self, sampling_info, span_event_data): """Called to submit sample set for span events.""" @@ -128,6 +142,20 @@ def send_metric_data(self, start_time, end_time, metric_data): payload = (self.agent_run_id, start_time, end_time, metric_data) return self._protocol.send("metric_data", payload) + def send_dimensional_metric_data(self, start_time, end_time, metric_data): + """Called to submit dimensional metric data for specified period of time. + Time values are seconds since UNIX epoch as returned by the + time.time() function. The metric data should be iterable of + specific metrics. + + NOTE: This data is sent not sent to the normal agent endpoints but is sent + to the OTLP API endpoints to keep the entity separate. This is for use + with the machine learning integration only. + """ + + payload = encode_metric_data(metric_data, start_time, end_time) + return self._otlp_protocol.send("dimensional_metric_data", payload, path="/v1/metrics") + def send_log_events(self, sampling_info, log_event_data): """Called to submit sample set for log events.""" diff --git a/newrelic/core/otlp_utils.py b/newrelic/core/otlp_utils.py new file mode 100644 index 000000000..e78a63603 --- /dev/null +++ b/newrelic/core/otlp_utils.py @@ -0,0 +1,243 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module provides common utilities for interacting with OTLP protocol buffers. + +The serialization implemented here attempts to use protobuf as an encoding, but falls +back to JSON when encoutering exceptions unless the content type is explicitly set in debug settings. +""" + +import logging + +from newrelic.common.encoding_utils import json_encode +from newrelic.core.config import global_settings +from newrelic.core.stats_engine import CountStats, TimeStats + +_logger = logging.getLogger(__name__) + +_settings = global_settings() +otlp_content_setting = _settings.debug.otlp_content_encoding +if not otlp_content_setting or otlp_content_setting == "protobuf": + try: + from newrelic.packages.opentelemetry_proto.common_pb2 import AnyValue, KeyValue + from newrelic.packages.opentelemetry_proto.logs_pb2 import ( + LogsData, + ResourceLogs, + ScopeLogs, + ) + from newrelic.packages.opentelemetry_proto.metrics_pb2 import ( + AggregationTemporality, + Metric, + MetricsData, + NumberDataPoint, + ResourceMetrics, + ScopeMetrics, + Sum, + Summary, + SummaryDataPoint, + ) + from newrelic.packages.opentelemetry_proto.resource_pb2 import Resource + + ValueAtQuantile = SummaryDataPoint.ValueAtQuantile + AGGREGATION_TEMPORALITY_DELTA = AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA + OTLP_CONTENT_TYPE = "application/x-protobuf" + + otlp_content_setting = "protobuf" # Explicitly set to overwrite None values + except Exception: + if otlp_content_setting == "protobuf": + raise # Reraise exception if content type explicitly set + # Fallback to JSON + otlp_content_setting = "json" + + +if otlp_content_setting == "json": + AnyValue = dict + KeyValue = dict + Metric = dict + MetricsData = dict + NumberDataPoint = dict + Resource = dict + ResourceMetrics = dict + ScopeMetrics = dict + Sum = dict + Summary = dict + SummaryDataPoint = dict + ValueAtQuantile = dict + ResourceLogs = dict + ScopeLogs = dict + LogsData = dict + + AGGREGATION_TEMPORALITY_DELTA = 1 + OTLP_CONTENT_TYPE = "application/json" + + +def otlp_encode(payload): + if type(payload) is dict: # pylint: disable=C0123 + _logger.warning( + "Using OTLP integration while protobuf is not installed. This may result in larger payload sizes and data loss." + ) + return json_encode(payload).encode("utf-8") + return payload.SerializeToString() + + +def create_key_value(key, value): + if isinstance(value, bool): + return KeyValue(key=key, value=AnyValue(bool_value=value)) + elif isinstance(value, int): + return KeyValue(key=key, value=AnyValue(int_value=value)) + elif isinstance(value, float): + return KeyValue(key=key, value=AnyValue(double_value=value)) + elif isinstance(value, str): + return KeyValue(key=key, value=AnyValue(string_value=value)) + # Technically AnyValue accepts array, kvlist, and bytes however, since + # those are not valid custom attribute types according to our api spec, + # we will not bother to support them here either. + else: + _logger.warning("Unsupported attribute value type %s: %s." % (key, value)) + + +def create_key_values_from_iterable(iterable): + if not iterable: + return None + elif isinstance(iterable, dict): + iterable = iterable.items() + + # The create_key_value list may return None if the value is an unsupported type + # so filter None values out before returning. + return list( + filter( + lambda i: i is not None, + (create_key_value(key, value) for key, value in iterable), + ) + ) + + +def create_resource(attributes=None): + attributes = attributes or {"instrumentation.provider": "newrelic-opentelemetry-python-ml"} + return Resource(attributes=create_key_values_from_iterable(attributes)) + + +def TimeStats_to_otlp_data_point(self, start_time, end_time, attributes=None): + data = SummaryDataPoint( + time_unix_nano=int(end_time * 1e9), # Time of current harvest + start_time_unix_nano=int(start_time * 1e9), # Time of last harvest + attributes=attributes, + count=int(self[0]), + sum=float(self[1]), + quantile_values=[ + ValueAtQuantile(quantile=0.0, value=float(self[3])), # Min Value + ValueAtQuantile(quantile=1.0, value=float(self[4])), # Max Value + ], + ) + return data + + +def CountStats_to_otlp_data_point(self, start_time, end_time, attributes=None): + data = NumberDataPoint( + time_unix_nano=int(end_time * 1e9), # Time of current harvest + start_time_unix_nano=int(start_time * 1e9), # Time of last harvest + attributes=attributes, + as_int=int(self[0]), + ) + return data + + +def stats_to_otlp_metrics(metric_data, start_time, end_time): + """ + Generator producing protos for Summary and Sum metrics, for CountStats and TimeStats respectively. + + Individual Metric protos must be entirely one type of metric data point. For mixed metric types we have to + separate the types and report multiple metrics, one for each type. + """ + for name, metric_container in metric_data: + # Types are checked here using type() instead of isinstance, as CountStats is a subclass of TimeStats. + # Imporperly checking with isinstance will lead to count metrics being encoded and reported twice. + if any(type(metric) is CountStats for metric in metric_container.values()): # pylint: disable=C0123 + # Metric contains Sum metric data points. + yield Metric( + name=name, + sum=Sum( + aggregation_temporality=AGGREGATION_TEMPORALITY_DELTA, + is_monotonic=True, + data_points=[ + CountStats_to_otlp_data_point( + value, + start_time=start_time, + end_time=end_time, + attributes=create_key_values_from_iterable(tags), + ) + for tags, value in metric_container.items() + if type(value) is CountStats # pylint: disable=C0123 + ], + ), + ) + if any(type(metric) is TimeStats for metric in metric_container.values()): # pylint: disable=C0123 + # Metric contains Summary metric data points. + yield Metric( + name=name, + summary=Summary( + data_points=[ + TimeStats_to_otlp_data_point( + value, + start_time=start_time, + end_time=end_time, + attributes=create_key_values_from_iterable(tags), + ) + for tags, value in metric_container.items() + if type(value) is TimeStats # pylint: disable=C0123 + ] + ), + ) + + +def encode_metric_data(metric_data, start_time, end_time, resource=None, scope=None): + resource = resource or create_resource() + return MetricsData( + resource_metrics=[ + ResourceMetrics( + resource=resource, + scope_metrics=[ + ScopeMetrics( + scope=scope, + metrics=list(stats_to_otlp_metrics(metric_data, start_time, end_time)), + ) + ], + ) + ] + ) + + +def encode_ml_event_data(custom_event_data, agent_run_id): + resource = create_resource() + ml_events = [] + for event in custom_event_data: + event_info, event_attrs = event + event_attrs.update( + { + "real_agent_id": agent_run_id, + "event.domain": "newrelic.ml_events", + "event.name": event_info["type"], + } + ) + ml_attrs = create_key_values_from_iterable(event_attrs) + unix_nano_timestamp = event_info["timestamp"] * 1e6 + ml_events.append( + { + "time_unix_nano": int(unix_nano_timestamp), + "attributes": ml_attrs, + } + ) + + return LogsData(resource_logs=[ResourceLogs(resource=resource, scope_logs=[ScopeLogs(log_records=ml_events)])]) diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index 88ec31c6e..ebebe7dbe 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -35,6 +35,7 @@ from newrelic.api.settings import STRIP_EXCEPTION_MESSAGE from newrelic.api.time_trace import get_linking_metadata from newrelic.common.encoding_utils import json_encode +from newrelic.common.metric_utils import create_metric_identity from newrelic.common.object_names import parse_exc_info from newrelic.common.streaming_utils import StreamBuffer from newrelic.core.attribute import ( @@ -61,7 +62,7 @@ "reset_synthetics_events", ), "span_event_data": ("reset_span_events",), - "custom_event_data": ("reset_custom_events",), + "custom_event_data": ("reset_custom_events", "reset_ml_events"), "error_event_data": ("reset_error_events",), "log_event_data": ("reset_log_events",), } @@ -180,6 +181,11 @@ def merge_custom_metric(self, value): self.merge_raw_time_metric(value) + def merge_dimensional_metric(self, value): + """Merge data value.""" + + self.merge_raw_time_metric(value) + class CountStats(TimeStats): def merge_stats(self, other): @@ -235,6 +241,99 @@ def reset_metric_stats(self): self.__stats_table = {} +class DimensionalMetrics(object): + + """Nested dictionary table for collecting a set of metrics broken down by tags.""" + + def __init__(self): + self.__stats_table = {} + + def __contains__(self, key): + if isinstance(key, tuple): + if not isinstance(key[1], frozenset): + # Convert tags dict to a frozen set for proper comparisons + name, tags = create_metric_identity(*key) + else: + name, tags = key + + # Check that both metric name and tags are already present. + stats_container = self.__stats_table.get(name) + return stats_container and tags in stats_container + else: + # Only look for metric name + return key in self.__stats_table + + def record_dimensional_metric(self, name, value, tags=None): + """Record a single value metric, merging the data with any data + from prior value metrics with the same name and tags. + """ + name, tags = create_metric_identity(name, tags) + + if isinstance(value, dict): + if len(value) == 1 and "count" in value: + new_stats = CountStats(call_count=value["count"]) + else: + new_stats = TimeStats(*c2t(**value)) + else: + new_stats = TimeStats(1, value, value, value, value, value**2) + + stats_container = self.__stats_table.get(name) + if stats_container is None: + # No existing metrics with this name. Set up new stats container. + self.__stats_table[name] = {tags: new_stats} + else: + # Existing metric container found. + stats = stats_container.get(tags) + if stats is None: + # No data points for this set of tags. Add new data. + stats_container[tags] = new_stats + else: + # Existing data points found, merge stats. + stats.merge_stats(new_stats) + + return (name, tags) + + def metrics(self): + """Returns an iterator over the set of value metrics. + The items returned are a dictionary of tags for each metric value. + Metric values are each a tuple consisting of the metric name and accumulated + stats for the metric. + """ + + return six.iteritems(self.__stats_table) + + def metrics_count(self): + """Returns a count of the number of unique metrics currently + recorded for apdex, time and value metrics. + """ + + return sum(len(metric) for metric in self.__stats_table.values()) + + def reset_metric_stats(self): + """Resets the accumulated statistics back to initial state for + metric data. + """ + self.__stats_table = {} + + def get(self, key, default=None): + return self.__stats_table.get(key, default) + + def __setitem__(self, key, value): + self.__stats_table[key] = value + + def __getitem__(self, key): + return self.__stats_table[key] + + def __str__(self): + return str(self.__stats_table) + + def __repr__(self): + return "%s(%s)" % (__class__.__name__, repr(self.__stats_table)) + + def items(self): + return self.metrics() + + class SlowSqlStats(list): def __init__(self): super(SlowSqlStats, self).__init__([0, 0, 0, 0, None]) @@ -433,9 +532,11 @@ class StatsEngine(object): def __init__(self): self.__settings = None self.__stats_table = {} + self.__dimensional_stats_table = DimensionalMetrics() self._transaction_events = SampledDataSet() self._error_events = SampledDataSet() self._custom_events = SampledDataSet() + self._ml_events = SampledDataSet() self._span_events = SampledDataSet() self._log_events = SampledDataSet() self._span_stream = None @@ -456,6 +557,10 @@ def settings(self): def stats_table(self): return self.__stats_table + @property + def dimensional_stats_table(self): + return self.__dimensional_stats_table + @property def transaction_events(self): return self._transaction_events @@ -464,6 +569,10 @@ def transaction_events(self): def custom_events(self): return self._custom_events + @property + def ml_events(self): + return self._ml_events + @property def span_events(self): return self._span_events @@ -494,7 +603,7 @@ def metrics_count(self): """ - return len(self.__stats_table) + return len(self.__stats_table) + self.__dimensional_stats_table.metrics_count() def record_apdex_metric(self, metric): """Record a single apdex metric, merging the data with any data @@ -716,7 +825,6 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, user_attributes = create_user_attributes(custom_attributes, settings.attribute_filter) - # Extract additional details about the exception as agent attributes agent_attributes = {} @@ -728,28 +836,37 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, error_group_name = None try: # Call callback to obtain error group name - error_group_name_raw = settings.error_collector.error_group_callback(value, { - "traceback": tb, - "error.class": exc, - "error.message": message_raw, - "error.expected": is_expected, - "custom_params": attributes, - # Transaction specific items should be set to None - "transactionName": None, - "response.status": None, - "request.method": None, - "request.uri": None, - }) + error_group_name_raw = settings.error_collector.error_group_callback( + value, + { + "traceback": tb, + "error.class": exc, + "error.message": message_raw, + "error.expected": is_expected, + "custom_params": attributes, + # Transaction specific items should be set to None + "transactionName": None, + "response.status": None, + "request.method": None, + "request.uri": None, + }, + ) if error_group_name_raw: _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, six.string_types): - raise ValueError("Invalid attribute value for error.group.name. Expected string, got: %s" % repr(error_group_name_raw)) + raise ValueError( + "Invalid attribute value for error.group.name. Expected string, got: %s" + % repr(error_group_name_raw) + ) else: agent_attributes["error.group.name"] = error_group_name except Exception: - _logger.error("Encountered error when calling error group callback:\n%s", "".join(traceback.format_exception(*sys.exc_info()))) - + _logger.error( + "Encountered error when calling error group callback:\n%s", + "".join(traceback.format_exception(*sys.exc_info())), + ) + agent_attributes = create_agent_attributes(agent_attributes, settings.attribute_filter) # Record the exception details. @@ -774,7 +891,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, for attr in agent_attributes: if attr.destinations & DST_ERROR_COLLECTOR: attributes["agentAttributes"][attr.name] = attr.value - + error_details = TracedError( start_time=time.time(), path="Exception", message=message, type=fullname, parameters=attributes ) @@ -829,6 +946,15 @@ def record_custom_event(self, event): if settings.collect_custom_events and settings.custom_insights_events.enabled: self._custom_events.add(event) + def record_ml_event(self, event): + settings = self.__settings + + if not settings: + return + + if settings.ml_insights_events.enabled: + self._ml_events.add(event) + def record_custom_metric(self, name, value): """Record a single value metric, merging the data with any data from prior value metrics with the same name. @@ -865,6 +991,28 @@ def record_custom_metrics(self, metrics): for name, value in metrics: self.record_custom_metric(name, value) + def record_dimensional_metric(self, name, value, tags=None): + """Record a single value metric, merging the data with any data + from prior value metrics with the same name and tags. + """ + return self.__dimensional_stats_table.record_dimensional_metric(name, value, tags) + + def record_dimensional_metrics(self, metrics): + """Record the value metrics supplied by the iterable, merging + the data with any data from prior value metrics with the same + name. + + """ + + if not self.__settings: + return + + for metric in metrics: + name, value = metric[:2] + tags = metric[2] if len(metric) >= 3 else None + + self.record_dimensional_metric(name, value, tags) + def record_slow_sql_node(self, node): """Record a single sql metric, merging the data with any data from prior sql metrics for the same sql key. @@ -975,6 +1123,8 @@ def record_transaction(self, transaction): self.merge_custom_metrics(transaction.custom_metrics.metrics()) + self.merge_dimensional_metrics(transaction.dimensional_metrics.metrics()) + self.record_time_metrics(transaction.time_metrics(self)) # Capture any errors if error collection is enabled. @@ -1042,6 +1192,11 @@ def record_transaction(self, transaction): if settings.collect_custom_events and settings.custom_insights_events.enabled: self.custom_events.merge(transaction.custom_events) + # Merge in machine learning events + + if settings.ml_insights_events.enabled: + self.ml_events.merge(transaction.ml_events) + # Merge in span events if settings.distributed_tracing.enabled and settings.span_events.enabled and settings.collect_span_events: @@ -1163,6 +1318,66 @@ def metric_data_count(self): return len(self.__stats_table) + def dimensional_metric_data(self, normalizer=None): + """Returns a list containing the low level metric data for + sending to the core application pertaining to the reporting + period. This consists of tuple pairs where first is dictionary + with name and scope keys with corresponding values, or integer + identifier if metric had an entry in dictionary mapping metric + (name, tags) as supplied from core application. The second is + the list of accumulated metric data, the list always being of + length 6. + + """ + + if not self.__settings: + return [] + + result = [] + normalized_stats = {} + + # Metric Renaming and Re-Aggregation. After applying the metric + # renaming rules, the metrics are re-aggregated to collapse the + # metrics with same names after the renaming. + + if self.__settings.debug.log_raw_metric_data: + _logger.info( + "Raw dimensional metric data for harvest of %r is %r.", + self.__settings.app_name, + list(self.__dimensional_stats_table.metrics()), + ) + + if normalizer is not None: + for key, value in self.__dimensional_stats_table.metrics(): + key = normalizer(key)[0] + stats = normalized_stats.get(key) + if stats is None: + normalized_stats[key] = copy.copy(value) + else: + stats.merge_stats(value) + else: + normalized_stats = self.__dimensional_stats_table + + if self.__settings.debug.log_normalized_metric_data: + _logger.info( + "Normalized metric data for harvest of %r is %r.", + self.__settings.app_name, + list(normalized_stats.metrics()), + ) + + for key, value in normalized_stats.items(): + result.append((key, value)) + + return result + + def dimensional_metric_data_count(self): + """Returns a count of the number of unique metrics.""" + + if not self.__settings: + return 0 + + return self.__dimensional_stats_table.metrics_count() + def error_data(self): """Returns a to a list containing any errors collected during the reporting period. @@ -1440,7 +1655,6 @@ def reset_stats(self, settings, reset_stream=False): """ self.__settings = settings - self.__stats_table = {} self.__sql_stats_table = {} self.__slow_transaction = None self.__slow_transaction_map = {} @@ -1448,9 +1662,11 @@ def reset_stats(self, settings, reset_stream=False): self.__transaction_errors = [] self.__synthetics_transactions = [] + self.reset_metric_stats() self.reset_transaction_events() self.reset_error_events() self.reset_custom_events() + self.reset_ml_events() self.reset_span_events() self.reset_log_events() self.reset_synthetics_events() @@ -1467,6 +1683,7 @@ def reset_metric_stats(self): """ self.__stats_table = {} + self.__dimensional_stats_table.reset_metric_stats() def reset_transaction_events(self): """Resets the accumulated statistics back to initial state for @@ -1493,6 +1710,12 @@ def reset_custom_events(self): else: self._custom_events = SampledDataSet() + def reset_ml_events(self): + if self.__settings is not None: + self._ml_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.ml_event_data) + else: + self._ml_events = SampledDataSet() + def reset_span_events(self): if self.__settings is not None: self._span_events = SampledDataSet(self.__settings.event_harvest_config.harvest_limits.span_event_data) @@ -1626,6 +1849,7 @@ def merge(self, snapshot): self._merge_error_events(snapshot) self._merge_error_traces(snapshot) self._merge_custom_events(snapshot) + self._merge_ml_events(snapshot) self._merge_span_events(snapshot) self._merge_log_events(snapshot) self._merge_sql(snapshot) @@ -1651,6 +1875,7 @@ def rollback(self, snapshot): self._merge_synthetics_events(snapshot, rollback=True) self._merge_error_events(snapshot) self._merge_custom_events(snapshot, rollback=True) + self._merge_ml_events(snapshot, rollback=True) self._merge_span_events(snapshot, rollback=True) self._merge_log_events(snapshot, rollback=True) @@ -1720,6 +1945,12 @@ def _merge_custom_events(self, snapshot, rollback=False): return self._custom_events.merge(events) + def _merge_ml_events(self, snapshot, rollback=False): + events = snapshot.ml_events + if not events: + return + self._ml_events.merge(events) + def _merge_span_events(self, snapshot, rollback=False): events = snapshot.span_events if not events: @@ -1789,6 +2020,29 @@ def merge_custom_metrics(self, metrics): else: stats.merge_stats(other) + def merge_dimensional_metrics(self, metrics): + """ + Merges in a set of dimensional metrics. The metrics should be + provide as an iterable where each item is a tuple of the metric + key and the accumulated stats for the metric. The metric key should + also be a tuple, containing a name and attribute filtered frozenset of tags. + """ + + if not self.__settings: + return + + for key, other in metrics: + stats_container = self.__dimensional_stats_table.get(key) + if not stats_container: + self.__dimensional_stats_table[key] = other + else: + for tags, other_value in other.items(): + stats = stats_container.get(tags) + if not stats: + stats_container[tags] = other_value + else: + stats.merge_stats(other_value) + def _snapshot(self): copy = object.__new__(StatsEngineSnapshot) copy.__dict__.update(self.__dict__) @@ -1802,6 +2056,9 @@ def reset_transaction_events(self): def reset_custom_events(self): self._custom_events = None + def reset_ml_events(self): + self._ml_events = None + def reset_span_events(self): self._span_events = None diff --git a/newrelic/core/transaction_node.py b/newrelic/core/transaction_node.py index 0faae3790..d63d7f9b6 100644 --- a/newrelic/core/transaction_node.py +++ b/newrelic/core/transaction_node.py @@ -60,10 +60,12 @@ "errors", "slow_sql", "custom_events", + "ml_events", "log_events", "apdex_t", "suppress_apdex", "custom_metrics", + "dimensional_metrics", "guid", "cpu_time", "suppress_transaction_trace", diff --git a/newrelic/hooks/mlmodel_sklearn.py b/newrelic/hooks/mlmodel_sklearn.py new file mode 100644 index 000000000..bdfeccfc8 --- /dev/null +++ b/newrelic/hooks/mlmodel_sklearn.py @@ -0,0 +1,781 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import uuid + +from newrelic.api.function_trace import FunctionTrace +from newrelic.api.time_trace import current_trace +from newrelic.api.transaction import current_transaction +from newrelic.common.object_wrapper import ObjectProxy, wrap_function_wrapper +from newrelic.core.config import global_settings + +METHODS_TO_WRAP = ("predict", "fit", "fit_predict", "predict_log_proba", "predict_proba", "transform", "score") +METRIC_SCORERS = ( + "accuracy_score", + "balanced_accuracy_score", + "f1_score", + "precision_score", + "recall_score", + "roc_auc_score", + "r2_score", +) +PY2 = sys.version_info[0] == 2 +_logger = logging.getLogger(__name__) + + +def isnumeric(column): + import numpy as np + + try: + column.astype(np.float64) + return [True] * len(column) + except: + pass + return [False] * len(column) + + +class PredictReturnTypeProxy(ObjectProxy): + def __init__(self, wrapped, model_name, training_step): + super(ObjectProxy, self).__init__(wrapped) + self._nr_model_name = model_name + self._nr_training_step = training_step + + +def _wrap_method_trace(module, class_, method, name=None, group=None): + def _nr_wrapper_method(wrapped, instance, args, kwargs): + transaction = current_transaction() + trace = current_trace() + + if transaction is None: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + + if settings and not settings.machine_learning.enabled: + return wrapped(*args, **kwargs) + + wrapped_attr_name = "_nr_wrapped_%s" % method + + # If the method has already been wrapped do not wrap it again. This happens + # when one class inherits from another and they both implement the method. + if getattr(trace, wrapped_attr_name, False): + return wrapped(*args, **kwargs) + + trace = FunctionTrace(name=name, group=group, source=wrapped) + + try: + # Set the _nr_wrapped attribute to denote that this method is being wrapped. + setattr(trace, wrapped_attr_name, True) + + with trace: + return_val = wrapped(*args, **kwargs) + finally: + # Set the _nr_wrapped attribute to denote that this method is no longer wrapped. + setattr(trace, wrapped_attr_name, False) + + # If this is the fit method, increment the training_step counter. + if method in ("fit", "fit_predict"): + training_step = getattr(instance, "_nr_wrapped_training_step", -1) + setattr(instance, "_nr_wrapped_training_step", training_step + 1) + + # If this is the predict method, wrap the return type in an nr type with + # _nr_wrapped attrs that will attach model info to the data. + if method in ("predict", "fit_predict"): + training_step = getattr(instance, "_nr_wrapped_training_step", "Unknown") + create_prediction_event(transaction, class_, instance, args, kwargs, return_val) + return PredictReturnTypeProxy(return_val, model_name=class_, training_step=training_step) + return return_val + + wrap_function_wrapper(module, "%s.%s" % (class_, method), _nr_wrapper_method) + + +def _calc_prediction_feature_stats(prediction_input, class_, feature_column_names, tags): + import numpy as np + + # Drop any feature columns that are not numeric since we can't compute stats + # on non-numeric columns. + x = np.array(prediction_input) + isnumeric_features = np.apply_along_axis(isnumeric, 0, x) + numeric_features = x[isnumeric_features] + + # Drop any feature column names that are not numeric since we can't compute stats + # on non-numeric columns. + feature_column_names = feature_column_names[isnumeric_features[0]] + + # Only compute stats for features if we have any feature columns left after dropping + # non-numeric columns. + num_cols = len(feature_column_names) + if num_cols > 0: + # Boolean selection of numpy array values reshapes the array to a single + # dimension so we have to reshape it back into a 2D array. + features = np.reshape(numeric_features, (len(numeric_features) // num_cols, num_cols)) + features = features.astype(dtype=np.float64) + + _record_stats(features, feature_column_names, class_, "Feature", tags) + + +def _record_stats(data, column_names, class_, column_type, tags): + import numpy as np + + mean = np.mean(data, axis=0) + percentile25 = np.percentile(data, q=0.25, axis=0) + percentile50 = np.percentile(data, q=0.50, axis=0) + percentile75 = np.percentile(data, q=0.75, axis=0) + standard_deviation = np.std(data, axis=0) + _min = np.min(data, axis=0) + _max = np.max(data, axis=0) + _count = data.shape[0] + + transaction = current_transaction() + + # Currently record_metric only supports a subset of these stats so we have + # to upload them one at a time instead of as a dictionary of stats per + # feature column. + for index, col_name in enumerate(column_names): + metric_name = "MLModel/Sklearn/Named/%s/Predict/%s/%s" % (class_, column_type, col_name) + + transaction.record_dimensional_metrics( + [ + ("%s/%s" % (metric_name, "Mean"), float(mean[index]), tags), + ("%s/%s" % (metric_name, "Percentile25"), float(percentile25[index]), tags), + ("%s/%s" % (metric_name, "Percentile50"), float(percentile50[index]), tags), + ("%s/%s" % (metric_name, "Percentile75"), float(percentile75[index]), tags), + ("%s/%s" % (metric_name, "StandardDeviation"), float(standard_deviation[index]), tags), + ("%s/%s" % (metric_name, "Min"), float(_min[index]), tags), + ("%s/%s" % (metric_name, "Max"), float(_max[index]), tags), + ("%s/%s" % (metric_name, "Count"), _count, tags), + ] + ) + + +def _calc_prediction_label_stats(labels, class_, label_column_names, tags): + import numpy as np + + labels = np.array(labels, dtype=np.float64) + _record_stats(labels, label_column_names, class_, "Label", tags) + + +def _get_label_names(user_defined_label_names, prediction_array): + import numpy as np + + if user_defined_label_names is None: + return np.array(range(prediction_array.shape[1])) + if user_defined_label_names and len(user_defined_label_names) != prediction_array.shape[1]: + _logger.warning( + "The number of label names passed to the ml_model wrapper function is not equal to the number of predictions in the data set. Please supply the correct number of label names." + ) + return np.array(range(prediction_array.shape[1])) + else: + return user_defined_label_names + + +def find_type_category(data_set, row_index, column_index): + # If pandas DataFrame, return type of column. + pd = sys.modules.get("pandas", None) + if pd and isinstance(data_set, pd.DataFrame): + value_type = data_set.iloc[:, column_index].dtype.name + if value_type == "category": + return "categorical" + categorized_value_type = categorize_data_type(value_type) + return categorized_value_type + # If it's not a pandas DataFrame then it is a list or numpy array. + python_type = str(type(data_set[column_index][row_index])) + return categorize_data_type(python_type) + + +def categorize_data_type(python_type): + if "int" in python_type or "float" in python_type or "complex" in python_type: + return "numeric" + if "bool" in python_type: + return "bool" + if "str" in python_type or "unicode" in python_type: + return "str" + else: + return python_type + + +def _get_feature_column_names(user_provided_feature_names, features): + import numpy as np + + num_feature_columns = np.array(features).shape[1] + + # If the user provided feature names are the correct size, return the user provided feature + # names. + if user_provided_feature_names and len(user_provided_feature_names) == num_feature_columns: + return np.array(user_provided_feature_names) + + # If the user provided feature names aren't the correct size, log a warning and do not use the user provided feature names. + if user_provided_feature_names: + _logger.warning( + "The number of feature names passed to the ml_model wrapper function is not equal to the number of columns in the data set. Please supply the correct number of feature names." + ) + + # If the user doesn't provide the feature names or they were provided but the size was incorrect and the features are a pandas data frame, return the column names from the pandas data frame. + pd = sys.modules.get("pandas", None) + if pd and isinstance(features, pd.DataFrame): + return features.columns + + # If the user doesn't provide the feature names or they were provided but the size was incorrect and the features are not a pandas data frame, return the column indexes as the feature names. + return np.array(range(num_feature_columns)) + + +def bind_predict(X, *args, **kwargs): + return X + + +def create_prediction_event(transaction, class_, instance, args, kwargs, return_val): + import numpy as np + + data_set = bind_predict(*args, **kwargs) + model_name = getattr(instance, "_nr_wrapped_name", class_) + model_version = getattr(instance, "_nr_wrapped_version", "0.0.0") + user_provided_feature_names = getattr(instance, "_nr_wrapped_feature_names", None) + label_names = getattr(instance, "_nr_wrapped_label_names", None) + metadata = getattr(instance, "_nr_wrapped_metadata", {}) + settings = transaction.settings if transaction.settings is not None else global_settings() + + prediction_id = uuid.uuid4() + + labels = [] + if return_val is not None: + if not hasattr(return_val, "__iter__"): + labels = np.array([return_val]) + else: + labels = np.array(return_val) + if len(labels.shape) == 1: + labels = np.reshape(labels, (len(labels) // 1, 1)) + + label_names_list = _get_label_names(label_names, labels) + _calc_prediction_label_stats( + labels, + class_, + label_names_list, + tags={ + "prediction_id": prediction_id, + "model_version": model_version, + # The following are used for entity synthesis. + "modelName": model_name, + }, + ) + + final_feature_names = _get_feature_column_names(user_provided_feature_names, data_set) + np_casted_data_set = np.array(data_set) + _calc_prediction_feature_stats( + data_set, + class_, + final_feature_names, + tags={ + "prediction_id": prediction_id, + "model_version": model_version, + # The following are used for entity synthesis. + "modelName": model_name, + }, + ) + features, predictions = np_casted_data_set.shape + for prediction_index, prediction in enumerate(np_casted_data_set): + inference_id = uuid.uuid4() + + event = { + "inference_id": inference_id, + "prediction_id": prediction_id, + "model_version": model_version, + "new_relic_data_schema_version": 2, + # The following are used for entity synthesis. + "modelName": model_name, + } + if metadata and isinstance(metadata, dict): + event.update(metadata) + # Don't include the raw value when inference_event_value is disabled. + if settings and settings.machine_learning and settings.machine_learning.inference_events_value.enabled: + event.update( + { + "feature.%s" % str(final_feature_names[feature_col_index]): value + for feature_col_index, value in enumerate(prediction) + } + ) + event.update( + { + "label.%s" % str(label_names_list[index]): str(value) + for index, value in enumerate(labels[prediction_index]) + } + ) + transaction.record_ml_event("InferenceData", event) + + +def _nr_instrument_model(module, model_class): + for method_name in METHODS_TO_WRAP: + if hasattr(getattr(module, model_class), method_name): + # Function/MLModel/Sklearn/Named/. + name = "MLModel/Sklearn/Named/%s.%s" % (model_class, method_name) + _wrap_method_trace(module, model_class, method_name, name=name) + + +def _instrument_sklearn_models(module, model_classes): + for model_cls in model_classes: + if hasattr(module, model_cls): + _nr_instrument_model(module, model_cls) + + +def _bind_scorer(y_true, y_pred, *args, **kwargs): + return y_true, y_pred, args, kwargs + + +def wrap_metric_scorer(wrapped, instance, args, kwargs): + transaction = current_transaction() + # If there is no transaction, do not wrap anything. + if not transaction: + return wrapped(*args, **kwargs) + + settings = transaction.settings if transaction.settings is not None else global_settings() + + if settings and not settings.machine_learning.enabled: + return wrapped(*args, **kwargs) + + score = wrapped(*args, **kwargs) + + y_true, y_pred, args, kwargs = _bind_scorer(*args, **kwargs) + model_name = "Unknown" + training_step = "Unknown" + if hasattr(y_pred, "_nr_model_name"): + model_name = y_pred._nr_model_name + if hasattr(y_pred, "_nr_training_step"): + training_step = y_pred._nr_training_step + # Attribute values must be int, float, str, or boolean. If it's not one of these + # types and an iterable add the values as separate attributes. + if not isinstance(score, (str, int, float, bool)): + if hasattr(score, "__iter__"): + for i, s in enumerate(score): + transaction._add_agent_attribute( + "%s/TrainingStep/%s/%s[%s]" % (model_name, training_step, wrapped.__name__, i), s + ) + else: + transaction._add_agent_attribute("%s/TrainingStep/%s/%s" % (model_name, training_step, wrapped.__name__), score) + return score + + +def instrument_sklearn_tree_models(module): + model_classes = ( + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_bagging_models(module): + model_classes = ( + "BaggingClassifier", + "BaggingRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_forest_models(module): + model_classes = ( + "ExtraTreesClassifier", + "ExtraTreesRegressor", + "RandomForestClassifier", + "RandomForestRegressor", + "RandomTreesEmbedding", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_iforest_models(module): + model_classes = ("IsolationForest",) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_weight_boosting_models(module): + model_classes = ( + "AdaBoostClassifier", + "AdaBoostRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_gradient_boosting_models(module): + model_classes = ( + "GradientBoostingClassifier", + "GradientBoostingRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_voting_models(module): + model_classes = ( + "VotingClassifier", + "VotingRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_ensemble_stacking_models(module): + module_classes = ( + "StackingClassifier", + "StackingRegressor", + ) + _instrument_sklearn_models(module, module_classes) + + +def instrument_sklearn_ensemble_hist_models(module): + model_classes = ( + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_coordinate_descent_models(module): + model_classes = ( + "Lasso", + "LassoCV", + "ElasticNet", + "ElasticNetCV", + "MultiTaskLasso", + "MultiTaskLassoCV", + "MultiTaskElasticNet", + "MultiTaskElasticNetCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_compose_models(module): + model_classes = ( + "ColumnTransformer", + "TransformedTargetRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_covariance_shrunk_models(module): + model_classes = ( + "ShrunkCovariance", + "LedoitWolf", + "OAS", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_cross_decomposition_models(module): + model_classes = ( + "PLSRegression", + "PLSSVD", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_covariance_graph_models(module): + model_classes = ( + "GraphicalLasso", + "GraphicalLassoCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_discriminant_analysis_models(module): + model_classes = ( + "LinearDiscriminantAnalysis", + "QuadraticDiscriminantAnalysis", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_covariance_models(module): + model_classes = ( + "EmpiricalCovariance", + "MinCovDet", + "EllipticEnvelope", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_gaussian_process_models(module): + model_classes = ( + "GaussianProcessClassifier", + "GaussianProcessRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_dummy_models(module): + model_classes = ( + "DummyClassifier", + "DummyRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_feature_selection_rfe_models(module): + model_classes = ( + "RFE", + "RFECV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_kernel_ridge_models(module): + model_classes = ("KernelRidge",) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_calibration_models(module): + model_classes = ("CalibratedClassifierCV",) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_cluster_models(module): + model_classes = ( + "AffinityPropagation", + "Birch", + "DBSCAN", + "MeanShift", + "OPTICS", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_least_angle_models(module): + model_classes = ( + "Lars", + "LarsCV", + "LassoLars", + "LassoLarsCV", + "LassoLarsIC", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_feature_selection_models(module): + model_classes = ( + "VarianceThreshold", + "SelectFromModel", + "SequentialFeatureSelector", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_cluster_agglomerative_models(module): + model_classes = ( + "AgglomerativeClustering", + "FeatureAgglomeration", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_GLM_models(module): + model_classes = ( + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_cluster_clustering_models(module): + model_classes = ( + "SpectralBiclustering", + "SpectralCoclustering", + "SpectralClustering", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_stochastic_gradient_models(module): + model_classes = ( + "SGDClassifier", + "SGDRegressor", + "SGDOneClassSVM", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_ridge_models(module): + model_classes = ( + "Ridge", + "RidgeCV", + "RidgeClassifier", + "RidgeClassifierCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_logistic_models(module): + model_classes = ( + "LogisticRegression", + "LogisticRegressionCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_OMP_models(module): + model_classes = ( + "OrthogonalMatchingPursuit", + "OrthogonalMatchingPursuitCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_passive_aggressive_models(module): + model_classes = ( + "PassiveAggressiveClassifier", + "PassiveAggressiveRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_bayes_models(module): + model_classes = ( + "ARDRegression", + "BayesianRidge", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_linear_models(module): + model_classes = ( + "HuberRegressor", + "LinearRegression", + "Perceptron", + "QuantileRegressor", + "TheilSenRegressor", + "RANSACRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_cluster_kmeans_models(module): + model_classes = ( + "BisectingKMeans", + "KMeans", + "MiniBatchKMeans", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_multiclass_models(module): + model_classes = ( + "OneVsRestClassifier", + "OneVsOneClassifier", + "OutputCodeClassifier", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_multioutput_models(module): + model_classes = ( + "MultiOutputEstimator", + "MultiOutputClassifier", + "ClassifierChain", + "RegressorChain", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_naive_bayes_models(module): + model_classes = ( + "GaussianNB", + "MultinomialNB", + "ComplementNB", + "BernoulliNB", + "CategoricalNB", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_model_selection_models(module): + model_classes = ( + "GridSearchCV", + "RandomizedSearchCV", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_mixture_models(module): + model_classes = ( + "GaussianMixture", + "BayesianGaussianMixture", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_neural_network_models(module): + model_classes = ( + "BernoulliRBM", + "MLPClassifier", + "MLPRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_neighbors_KRadius_models(module): + model_classes = ( + "KNeighborsClassifier", + "RadiusNeighborsClassifier", + "KNeighborsTransformer", + "RadiusNeighborsTransformer", + "KNeighborsRegressor", + "RadiusNeighborsRegressor", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_svm_models(module): + model_classes = ( + "LinearSVC", + "LinearSVR", + "SVC", + "NuSVC", + "SVR", + "NuSVR", + "OneClassSVM", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_semi_supervised_models(module): + model_classes = ( + "LabelPropagation", + "LabelSpreading", + "SelfTrainingClassifier", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_pipeline_models(module): + model_classes = ( + "Pipeline", + "FeatureUnion", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_neighbors_models(module): + model_classes = ( + "KernelDensity", + "LocalOutlierFactor", + "NeighborhoodComponentsAnalysis", + "NearestCentroid", + "NearestNeighbors", + ) + _instrument_sklearn_models(module, model_classes) + + +def instrument_sklearn_metrics(module): + for scorer in METRIC_SCORERS: + if hasattr(module, scorer): + wrap_function_wrapper(module, scorer, wrap_metric_scorer) diff --git a/newrelic/packages/opentelemetry_proto/LICENSE.txt b/newrelic/packages/opentelemetry_proto/LICENSE.txt new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/newrelic/packages/opentelemetry_proto/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/newrelic/packages/opentelemetry_proto/__init__.py b/newrelic/packages/opentelemetry_proto/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/newrelic/packages/opentelemetry_proto/common_pb2.py b/newrelic/packages/opentelemetry_proto/common_pb2.py new file mode 100644 index 000000000..a38431a58 --- /dev/null +++ b/newrelic/packages/opentelemetry_proto/common_pb2.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: opentelemetry/proto/common/v1/common.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n*opentelemetry/proto/common/v1/common.proto\x12\x1dopentelemetry.proto.common.v1\"\x8c\x02\n\x08\x41nyValue\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x02 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x03 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x04 \x01(\x01H\x00\x12@\n\x0b\x61rray_value\x18\x05 \x01(\x0b\x32).opentelemetry.proto.common.v1.ArrayValueH\x00\x12\x43\n\x0ckvlist_value\x18\x06 \x01(\x0b\x32+.opentelemetry.proto.common.v1.KeyValueListH\x00\x12\x15\n\x0b\x62ytes_value\x18\x07 \x01(\x0cH\x00\x42\x07\n\x05value\"E\n\nArrayValue\x12\x37\n\x06values\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\"G\n\x0cKeyValueList\x12\x37\n\x06values\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\"O\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\";\n\x16InstrumentationLibrary\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t:\x02\x18\x01\"5\n\x14InstrumentationScope\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\tB[\n io.opentelemetry.proto.common.v1B\x0b\x43ommonProtoP\x01Z(go.opentelemetry.io/proto/otlp/common/v1b\x06proto3') + + + +_ANYVALUE = DESCRIPTOR.message_types_by_name['AnyValue'] +_ARRAYVALUE = DESCRIPTOR.message_types_by_name['ArrayValue'] +_KEYVALUELIST = DESCRIPTOR.message_types_by_name['KeyValueList'] +_KEYVALUE = DESCRIPTOR.message_types_by_name['KeyValue'] +_INSTRUMENTATIONLIBRARY = DESCRIPTOR.message_types_by_name['InstrumentationLibrary'] +_INSTRUMENTATIONSCOPE = DESCRIPTOR.message_types_by_name['InstrumentationScope'] +AnyValue = _reflection.GeneratedProtocolMessageType('AnyValue', (_message.Message,), { + 'DESCRIPTOR' : _ANYVALUE, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.AnyValue) + }) +_sym_db.RegisterMessage(AnyValue) + +ArrayValue = _reflection.GeneratedProtocolMessageType('ArrayValue', (_message.Message,), { + 'DESCRIPTOR' : _ARRAYVALUE, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.ArrayValue) + }) +_sym_db.RegisterMessage(ArrayValue) + +KeyValueList = _reflection.GeneratedProtocolMessageType('KeyValueList', (_message.Message,), { + 'DESCRIPTOR' : _KEYVALUELIST, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.KeyValueList) + }) +_sym_db.RegisterMessage(KeyValueList) + +KeyValue = _reflection.GeneratedProtocolMessageType('KeyValue', (_message.Message,), { + 'DESCRIPTOR' : _KEYVALUE, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.KeyValue) + }) +_sym_db.RegisterMessage(KeyValue) + +InstrumentationLibrary = _reflection.GeneratedProtocolMessageType('InstrumentationLibrary', (_message.Message,), { + 'DESCRIPTOR' : _INSTRUMENTATIONLIBRARY, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.InstrumentationLibrary) + }) +_sym_db.RegisterMessage(InstrumentationLibrary) + +InstrumentationScope = _reflection.GeneratedProtocolMessageType('InstrumentationScope', (_message.Message,), { + 'DESCRIPTOR' : _INSTRUMENTATIONSCOPE, + '__module__' : 'opentelemetry.proto.common.v1.common_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.common.v1.InstrumentationScope) + }) +_sym_db.RegisterMessage(InstrumentationScope) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n io.opentelemetry.proto.common.v1B\013CommonProtoP\001Z(go.opentelemetry.io/proto/otlp/common/v1' + _INSTRUMENTATIONLIBRARY._options = None + _INSTRUMENTATIONLIBRARY._serialized_options = b'\030\001' + _ANYVALUE._serialized_start=78 + _ANYVALUE._serialized_end=346 + _ARRAYVALUE._serialized_start=348 + _ARRAYVALUE._serialized_end=417 + _KEYVALUELIST._serialized_start=419 + _KEYVALUELIST._serialized_end=490 + _KEYVALUE._serialized_start=492 + _KEYVALUE._serialized_end=571 + _INSTRUMENTATIONLIBRARY._serialized_start=573 + _INSTRUMENTATIONLIBRARY._serialized_end=632 + _INSTRUMENTATIONSCOPE._serialized_start=634 + _INSTRUMENTATIONSCOPE._serialized_end=687 +# @@protoc_insertion_point(module_scope) diff --git a/newrelic/packages/opentelemetry_proto/logs_pb2.py b/newrelic/packages/opentelemetry_proto/logs_pb2.py new file mode 100644 index 000000000..bb6a55d66 --- /dev/null +++ b/newrelic/packages/opentelemetry_proto/logs_pb2.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: opentelemetry/proto/logs/v1/logs.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 +from . import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&opentelemetry/proto/logs/v1/logs.proto\x12\x1bopentelemetry.proto.logs.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"L\n\x08LogsData\x12@\n\rresource_logs\x18\x01 \x03(\x0b\x32).opentelemetry.proto.logs.v1.ResourceLogs\"\xff\x01\n\x0cResourceLogs\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12:\n\nscope_logs\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.logs.v1.ScopeLogs\x12\x62\n\x1cinstrumentation_library_logs\x18\xe8\x07 \x03(\x0b\x32\x37.opentelemetry.proto.logs.v1.InstrumentationLibraryLogsB\x02\x18\x01\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\xa0\x01\n\tScopeLogs\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12;\n\x0blog_records\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.logs.v1.LogRecord\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\xc9\x01\n\x1aInstrumentationLibraryLogs\x12V\n\x17instrumentation_library\x18\x01 \x01(\x0b\x32\x35.opentelemetry.proto.common.v1.InstrumentationLibrary\x12;\n\x0blog_records\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.logs.v1.LogRecord\x12\x12\n\nschema_url\x18\x03 \x01(\t:\x02\x18\x01\"\xef\x02\n\tLogRecord\x12\x16\n\x0etime_unix_nano\x18\x01 \x01(\x06\x12\x1f\n\x17observed_time_unix_nano\x18\x0b \x01(\x06\x12\x44\n\x0fseverity_number\x18\x02 \x01(\x0e\x32+.opentelemetry.proto.logs.v1.SeverityNumber\x12\x15\n\rseverity_text\x18\x03 \x01(\t\x12\x35\n\x04\x62ody\x18\x05 \x01(\x0b\x32\'.opentelemetry.proto.common.v1.AnyValue\x12;\n\nattributes\x18\x06 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x07 \x01(\r\x12\r\n\x05\x66lags\x18\x08 \x01(\x07\x12\x10\n\x08trace_id\x18\t \x01(\x0c\x12\x0f\n\x07span_id\x18\n \x01(\x0cJ\x04\x08\x04\x10\x05*\xc3\x05\n\x0eSeverityNumber\x12\x1f\n\x1bSEVERITY_NUMBER_UNSPECIFIED\x10\x00\x12\x19\n\x15SEVERITY_NUMBER_TRACE\x10\x01\x12\x1a\n\x16SEVERITY_NUMBER_TRACE2\x10\x02\x12\x1a\n\x16SEVERITY_NUMBER_TRACE3\x10\x03\x12\x1a\n\x16SEVERITY_NUMBER_TRACE4\x10\x04\x12\x19\n\x15SEVERITY_NUMBER_DEBUG\x10\x05\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG2\x10\x06\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG3\x10\x07\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG4\x10\x08\x12\x18\n\x14SEVERITY_NUMBER_INFO\x10\t\x12\x19\n\x15SEVERITY_NUMBER_INFO2\x10\n\x12\x19\n\x15SEVERITY_NUMBER_INFO3\x10\x0b\x12\x19\n\x15SEVERITY_NUMBER_INFO4\x10\x0c\x12\x18\n\x14SEVERITY_NUMBER_WARN\x10\r\x12\x19\n\x15SEVERITY_NUMBER_WARN2\x10\x0e\x12\x19\n\x15SEVERITY_NUMBER_WARN3\x10\x0f\x12\x19\n\x15SEVERITY_NUMBER_WARN4\x10\x10\x12\x19\n\x15SEVERITY_NUMBER_ERROR\x10\x11\x12\x1a\n\x16SEVERITY_NUMBER_ERROR2\x10\x12\x12\x1a\n\x16SEVERITY_NUMBER_ERROR3\x10\x13\x12\x1a\n\x16SEVERITY_NUMBER_ERROR4\x10\x14\x12\x19\n\x15SEVERITY_NUMBER_FATAL\x10\x15\x12\x1a\n\x16SEVERITY_NUMBER_FATAL2\x10\x16\x12\x1a\n\x16SEVERITY_NUMBER_FATAL3\x10\x17\x12\x1a\n\x16SEVERITY_NUMBER_FATAL4\x10\x18*X\n\x0eLogRecordFlags\x12\x1f\n\x1bLOG_RECORD_FLAG_UNSPECIFIED\x10\x00\x12%\n LOG_RECORD_FLAG_TRACE_FLAGS_MASK\x10\xff\x01\x42U\n\x1eio.opentelemetry.proto.logs.v1B\tLogsProtoP\x01Z&go.opentelemetry.io/proto/otlp/logs/v1b\x06proto3') + +_SEVERITYNUMBER = DESCRIPTOR.enum_types_by_name['SeverityNumber'] +SeverityNumber = enum_type_wrapper.EnumTypeWrapper(_SEVERITYNUMBER) +_LOGRECORDFLAGS = DESCRIPTOR.enum_types_by_name['LogRecordFlags'] +LogRecordFlags = enum_type_wrapper.EnumTypeWrapper(_LOGRECORDFLAGS) +SEVERITY_NUMBER_UNSPECIFIED = 0 +SEVERITY_NUMBER_TRACE = 1 +SEVERITY_NUMBER_TRACE2 = 2 +SEVERITY_NUMBER_TRACE3 = 3 +SEVERITY_NUMBER_TRACE4 = 4 +SEVERITY_NUMBER_DEBUG = 5 +SEVERITY_NUMBER_DEBUG2 = 6 +SEVERITY_NUMBER_DEBUG3 = 7 +SEVERITY_NUMBER_DEBUG4 = 8 +SEVERITY_NUMBER_INFO = 9 +SEVERITY_NUMBER_INFO2 = 10 +SEVERITY_NUMBER_INFO3 = 11 +SEVERITY_NUMBER_INFO4 = 12 +SEVERITY_NUMBER_WARN = 13 +SEVERITY_NUMBER_WARN2 = 14 +SEVERITY_NUMBER_WARN3 = 15 +SEVERITY_NUMBER_WARN4 = 16 +SEVERITY_NUMBER_ERROR = 17 +SEVERITY_NUMBER_ERROR2 = 18 +SEVERITY_NUMBER_ERROR3 = 19 +SEVERITY_NUMBER_ERROR4 = 20 +SEVERITY_NUMBER_FATAL = 21 +SEVERITY_NUMBER_FATAL2 = 22 +SEVERITY_NUMBER_FATAL3 = 23 +SEVERITY_NUMBER_FATAL4 = 24 +LOG_RECORD_FLAG_UNSPECIFIED = 0 +LOG_RECORD_FLAG_TRACE_FLAGS_MASK = 255 + + +_LOGSDATA = DESCRIPTOR.message_types_by_name['LogsData'] +_RESOURCELOGS = DESCRIPTOR.message_types_by_name['ResourceLogs'] +_SCOPELOGS = DESCRIPTOR.message_types_by_name['ScopeLogs'] +_INSTRUMENTATIONLIBRARYLOGS = DESCRIPTOR.message_types_by_name['InstrumentationLibraryLogs'] +_LOGRECORD = DESCRIPTOR.message_types_by_name['LogRecord'] +LogsData = _reflection.GeneratedProtocolMessageType('LogsData', (_message.Message,), { + 'DESCRIPTOR' : _LOGSDATA, + '__module__' : 'opentelemetry.proto.logs.v1.logs_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.logs.v1.LogsData) + }) +_sym_db.RegisterMessage(LogsData) + +ResourceLogs = _reflection.GeneratedProtocolMessageType('ResourceLogs', (_message.Message,), { + 'DESCRIPTOR' : _RESOURCELOGS, + '__module__' : 'opentelemetry.proto.logs.v1.logs_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.logs.v1.ResourceLogs) + }) +_sym_db.RegisterMessage(ResourceLogs) + +ScopeLogs = _reflection.GeneratedProtocolMessageType('ScopeLogs', (_message.Message,), { + 'DESCRIPTOR' : _SCOPELOGS, + '__module__' : 'opentelemetry.proto.logs.v1.logs_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.logs.v1.ScopeLogs) + }) +_sym_db.RegisterMessage(ScopeLogs) + +InstrumentationLibraryLogs = _reflection.GeneratedProtocolMessageType('InstrumentationLibraryLogs', (_message.Message,), { + 'DESCRIPTOR' : _INSTRUMENTATIONLIBRARYLOGS, + '__module__' : 'opentelemetry.proto.logs.v1.logs_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.logs.v1.InstrumentationLibraryLogs) + }) +_sym_db.RegisterMessage(InstrumentationLibraryLogs) + +LogRecord = _reflection.GeneratedProtocolMessageType('LogRecord', (_message.Message,), { + 'DESCRIPTOR' : _LOGRECORD, + '__module__' : 'opentelemetry.proto.logs.v1.logs_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.logs.v1.LogRecord) + }) +_sym_db.RegisterMessage(LogRecord) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\036io.opentelemetry.proto.logs.v1B\tLogsProtoP\001Z&go.opentelemetry.io/proto/otlp/logs/v1' + _RESOURCELOGS.fields_by_name['instrumentation_library_logs']._options = None + _RESOURCELOGS.fields_by_name['instrumentation_library_logs']._serialized_options = b'\030\001' + _INSTRUMENTATIONLIBRARYLOGS._options = None + _INSTRUMENTATIONLIBRARYLOGS._serialized_options = b'\030\001' + _SEVERITYNUMBER._serialized_start=1237 + _SEVERITYNUMBER._serialized_end=1944 + _LOGRECORDFLAGS._serialized_start=1946 + _LOGRECORDFLAGS._serialized_end=2034 + _LOGSDATA._serialized_start=163 + _LOGSDATA._serialized_end=239 + _RESOURCELOGS._serialized_start=242 + _RESOURCELOGS._serialized_end=497 + _SCOPELOGS._serialized_start=500 + _SCOPELOGS._serialized_end=660 + _INSTRUMENTATIONLIBRARYLOGS._serialized_start=663 + _INSTRUMENTATIONLIBRARYLOGS._serialized_end=864 + _LOGRECORD._serialized_start=867 + _LOGRECORD._serialized_end=1234 +# @@protoc_insertion_point(module_scope) diff --git a/newrelic/packages/opentelemetry_proto/metrics_pb2.py b/newrelic/packages/opentelemetry_proto/metrics_pb2.py new file mode 100644 index 000000000..dea77c7de --- /dev/null +++ b/newrelic/packages/opentelemetry_proto/metrics_pb2.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: opentelemetry/proto/metrics/v1/metrics.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 +from . import resource_pb2 as opentelemetry_dot_proto_dot_resource_dot_v1_dot_resource__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,opentelemetry/proto/metrics/v1/metrics.proto\x12\x1eopentelemetry.proto.metrics.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/proto/resource/v1/resource.proto\"X\n\x0bMetricsData\x12I\n\x10resource_metrics\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.ResourceMetrics\"\x94\x02\n\x0fResourceMetrics\x12;\n\x08resource\x18\x01 \x01(\x0b\x32).opentelemetry.proto.resource.v1.Resource\x12\x43\n\rscope_metrics\x18\x02 \x03(\x0b\x32,.opentelemetry.proto.metrics.v1.ScopeMetrics\x12k\n\x1finstrumentation_library_metrics\x18\xe8\x07 \x03(\x0b\x32=.opentelemetry.proto.metrics.v1.InstrumentationLibraryMetricsB\x02\x18\x01\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\x9f\x01\n\x0cScopeMetrics\x12\x42\n\x05scope\x18\x01 \x01(\x0b\x32\x33.opentelemetry.proto.common.v1.InstrumentationScope\x12\x37\n\x07metrics\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.metrics.v1.Metric\x12\x12\n\nschema_url\x18\x03 \x01(\t\"\xc8\x01\n\x1dInstrumentationLibraryMetrics\x12V\n\x17instrumentation_library\x18\x01 \x01(\x0b\x32\x35.opentelemetry.proto.common.v1.InstrumentationLibrary\x12\x37\n\x07metrics\x18\x02 \x03(\x0b\x32&.opentelemetry.proto.metrics.v1.Metric\x12\x12\n\nschema_url\x18\x03 \x01(\t:\x02\x18\x01\"\x92\x03\n\x06Metric\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0c\n\x04unit\x18\x03 \x01(\t\x12\x36\n\x05gauge\x18\x05 \x01(\x0b\x32%.opentelemetry.proto.metrics.v1.GaugeH\x00\x12\x32\n\x03sum\x18\x07 \x01(\x0b\x32#.opentelemetry.proto.metrics.v1.SumH\x00\x12>\n\thistogram\x18\t \x01(\x0b\x32).opentelemetry.proto.metrics.v1.HistogramH\x00\x12U\n\x15\x65xponential_histogram\x18\n \x01(\x0b\x32\x34.opentelemetry.proto.metrics.v1.ExponentialHistogramH\x00\x12:\n\x07summary\x18\x0b \x01(\x0b\x32\'.opentelemetry.proto.metrics.v1.SummaryH\x00\x42\x06\n\x04\x64\x61taJ\x04\x08\x04\x10\x05J\x04\x08\x06\x10\x07J\x04\x08\x08\x10\t\"M\n\x05Gauge\x12\x44\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.NumberDataPoint\"\xba\x01\n\x03Sum\x12\x44\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32/.opentelemetry.proto.metrics.v1.NumberDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\x12\x14\n\x0cis_monotonic\x18\x03 \x01(\x08\"\xad\x01\n\tHistogram\x12G\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32\x32.opentelemetry.proto.metrics.v1.HistogramDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\"\xc3\x01\n\x14\x45xponentialHistogram\x12R\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32=.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint\x12W\n\x17\x61ggregation_temporality\x18\x02 \x01(\x0e\x32\x36.opentelemetry.proto.metrics.v1.AggregationTemporality\"P\n\x07Summary\x12\x45\n\x0b\x64\x61ta_points\x18\x01 \x03(\x0b\x32\x30.opentelemetry.proto.metrics.v1.SummaryDataPoint\"\x86\x02\n\x0fNumberDataPoint\x12;\n\nattributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\x13\n\tas_double\x18\x04 \x01(\x01H\x00\x12\x10\n\x06\x61s_int\x18\x06 \x01(\x10H\x00\x12;\n\texemplars\x18\x05 \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\r\n\x05\x66lags\x18\x08 \x01(\rB\x07\n\x05valueJ\x04\x08\x01\x10\x02\"\xe6\x02\n\x12HistogramDataPoint\x12;\n\nattributes\x18\t \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x10\n\x03sum\x18\x05 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\rbucket_counts\x18\x06 \x03(\x06\x12\x17\n\x0f\x65xplicit_bounds\x18\x07 \x03(\x01\x12;\n\texemplars\x18\x08 \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\r\n\x05\x66lags\x18\n \x01(\r\x12\x10\n\x03min\x18\x0b \x01(\x01H\x01\x88\x01\x01\x12\x10\n\x03max\x18\x0c \x01(\x01H\x02\x88\x01\x01\x42\x06\n\x04_sumB\x06\n\x04_minB\x06\n\x04_maxJ\x04\x08\x01\x10\x02\"\xb5\x04\n\x1d\x45xponentialHistogramDataPoint\x12;\n\nattributes\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x0b\n\x03sum\x18\x05 \x01(\x01\x12\r\n\x05scale\x18\x06 \x01(\x11\x12\x12\n\nzero_count\x18\x07 \x01(\x06\x12W\n\x08positive\x18\x08 \x01(\x0b\x32\x45.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets\x12W\n\x08negative\x18\t \x01(\x0b\x32\x45.opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets\x12\r\n\x05\x66lags\x18\n \x01(\r\x12;\n\texemplars\x18\x0b \x03(\x0b\x32(.opentelemetry.proto.metrics.v1.Exemplar\x12\x10\n\x03min\x18\x0c \x01(\x01H\x00\x88\x01\x01\x12\x10\n\x03max\x18\r \x01(\x01H\x01\x88\x01\x01\x1a\x30\n\x07\x42uckets\x12\x0e\n\x06offset\x18\x01 \x01(\x11\x12\x15\n\rbucket_counts\x18\x02 \x03(\x04\x42\x06\n\x04_minB\x06\n\x04_max\"\xc5\x02\n\x10SummaryDataPoint\x12;\n\nattributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x1c\n\x14start_time_unix_nano\x18\x02 \x01(\x06\x12\x16\n\x0etime_unix_nano\x18\x03 \x01(\x06\x12\r\n\x05\x63ount\x18\x04 \x01(\x06\x12\x0b\n\x03sum\x18\x05 \x01(\x01\x12Y\n\x0fquantile_values\x18\x06 \x03(\x0b\x32@.opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile\x12\r\n\x05\x66lags\x18\x08 \x01(\r\x1a\x32\n\x0fValueAtQuantile\x12\x10\n\x08quantile\x18\x01 \x01(\x01\x12\r\n\x05value\x18\x02 \x01(\x01J\x04\x08\x01\x10\x02\"\xc1\x01\n\x08\x45xemplar\x12\x44\n\x13\x66iltered_attributes\x18\x07 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12\x16\n\x0etime_unix_nano\x18\x02 \x01(\x06\x12\x13\n\tas_double\x18\x03 \x01(\x01H\x00\x12\x10\n\x06\x61s_int\x18\x06 \x01(\x10H\x00\x12\x0f\n\x07span_id\x18\x04 \x01(\x0c\x12\x10\n\x08trace_id\x18\x05 \x01(\x0c\x42\x07\n\x05valueJ\x04\x08\x01\x10\x02*\x8c\x01\n\x16\x41ggregationTemporality\x12\'\n#AGGREGATION_TEMPORALITY_UNSPECIFIED\x10\x00\x12!\n\x1d\x41GGREGATION_TEMPORALITY_DELTA\x10\x01\x12&\n\"AGGREGATION_TEMPORALITY_CUMULATIVE\x10\x02*;\n\x0e\x44\x61taPointFlags\x12\r\n\tFLAG_NONE\x10\x00\x12\x1a\n\x16\x46LAG_NO_RECORDED_VALUE\x10\x01\x42^\n!io.opentelemetry.proto.metrics.v1B\x0cMetricsProtoP\x01Z)go.opentelemetry.io/proto/otlp/metrics/v1b\x06proto3') + +_AGGREGATIONTEMPORALITY = DESCRIPTOR.enum_types_by_name['AggregationTemporality'] +AggregationTemporality = enum_type_wrapper.EnumTypeWrapper(_AGGREGATIONTEMPORALITY) +_DATAPOINTFLAGS = DESCRIPTOR.enum_types_by_name['DataPointFlags'] +DataPointFlags = enum_type_wrapper.EnumTypeWrapper(_DATAPOINTFLAGS) +AGGREGATION_TEMPORALITY_UNSPECIFIED = 0 +AGGREGATION_TEMPORALITY_DELTA = 1 +AGGREGATION_TEMPORALITY_CUMULATIVE = 2 +FLAG_NONE = 0 +FLAG_NO_RECORDED_VALUE = 1 + + +_METRICSDATA = DESCRIPTOR.message_types_by_name['MetricsData'] +_RESOURCEMETRICS = DESCRIPTOR.message_types_by_name['ResourceMetrics'] +_SCOPEMETRICS = DESCRIPTOR.message_types_by_name['ScopeMetrics'] +_INSTRUMENTATIONLIBRARYMETRICS = DESCRIPTOR.message_types_by_name['InstrumentationLibraryMetrics'] +_METRIC = DESCRIPTOR.message_types_by_name['Metric'] +_GAUGE = DESCRIPTOR.message_types_by_name['Gauge'] +_SUM = DESCRIPTOR.message_types_by_name['Sum'] +_HISTOGRAM = DESCRIPTOR.message_types_by_name['Histogram'] +_EXPONENTIALHISTOGRAM = DESCRIPTOR.message_types_by_name['ExponentialHistogram'] +_SUMMARY = DESCRIPTOR.message_types_by_name['Summary'] +_NUMBERDATAPOINT = DESCRIPTOR.message_types_by_name['NumberDataPoint'] +_HISTOGRAMDATAPOINT = DESCRIPTOR.message_types_by_name['HistogramDataPoint'] +_EXPONENTIALHISTOGRAMDATAPOINT = DESCRIPTOR.message_types_by_name['ExponentialHistogramDataPoint'] +_EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS = _EXPONENTIALHISTOGRAMDATAPOINT.nested_types_by_name['Buckets'] +_SUMMARYDATAPOINT = DESCRIPTOR.message_types_by_name['SummaryDataPoint'] +_SUMMARYDATAPOINT_VALUEATQUANTILE = _SUMMARYDATAPOINT.nested_types_by_name['ValueAtQuantile'] +_EXEMPLAR = DESCRIPTOR.message_types_by_name['Exemplar'] +MetricsData = _reflection.GeneratedProtocolMessageType('MetricsData', (_message.Message,), { + 'DESCRIPTOR' : _METRICSDATA, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.MetricsData) + }) +_sym_db.RegisterMessage(MetricsData) + +ResourceMetrics = _reflection.GeneratedProtocolMessageType('ResourceMetrics', (_message.Message,), { + 'DESCRIPTOR' : _RESOURCEMETRICS, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ResourceMetrics) + }) +_sym_db.RegisterMessage(ResourceMetrics) + +ScopeMetrics = _reflection.GeneratedProtocolMessageType('ScopeMetrics', (_message.Message,), { + 'DESCRIPTOR' : _SCOPEMETRICS, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ScopeMetrics) + }) +_sym_db.RegisterMessage(ScopeMetrics) + +InstrumentationLibraryMetrics = _reflection.GeneratedProtocolMessageType('InstrumentationLibraryMetrics', (_message.Message,), { + 'DESCRIPTOR' : _INSTRUMENTATIONLIBRARYMETRICS, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.InstrumentationLibraryMetrics) + }) +_sym_db.RegisterMessage(InstrumentationLibraryMetrics) + +Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), { + 'DESCRIPTOR' : _METRIC, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Metric) + }) +_sym_db.RegisterMessage(Metric) + +Gauge = _reflection.GeneratedProtocolMessageType('Gauge', (_message.Message,), { + 'DESCRIPTOR' : _GAUGE, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Gauge) + }) +_sym_db.RegisterMessage(Gauge) + +Sum = _reflection.GeneratedProtocolMessageType('Sum', (_message.Message,), { + 'DESCRIPTOR' : _SUM, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Sum) + }) +_sym_db.RegisterMessage(Sum) + +Histogram = _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), { + 'DESCRIPTOR' : _HISTOGRAM, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Histogram) + }) +_sym_db.RegisterMessage(Histogram) + +ExponentialHistogram = _reflection.GeneratedProtocolMessageType('ExponentialHistogram', (_message.Message,), { + 'DESCRIPTOR' : _EXPONENTIALHISTOGRAM, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ExponentialHistogram) + }) +_sym_db.RegisterMessage(ExponentialHistogram) + +Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), { + 'DESCRIPTOR' : _SUMMARY, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Summary) + }) +_sym_db.RegisterMessage(Summary) + +NumberDataPoint = _reflection.GeneratedProtocolMessageType('NumberDataPoint', (_message.Message,), { + 'DESCRIPTOR' : _NUMBERDATAPOINT, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.NumberDataPoint) + }) +_sym_db.RegisterMessage(NumberDataPoint) + +HistogramDataPoint = _reflection.GeneratedProtocolMessageType('HistogramDataPoint', (_message.Message,), { + 'DESCRIPTOR' : _HISTOGRAMDATAPOINT, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.HistogramDataPoint) + }) +_sym_db.RegisterMessage(HistogramDataPoint) + +ExponentialHistogramDataPoint = _reflection.GeneratedProtocolMessageType('ExponentialHistogramDataPoint', (_message.Message,), { + + 'Buckets' : _reflection.GeneratedProtocolMessageType('Buckets', (_message.Message,), { + 'DESCRIPTOR' : _EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets) + }) + , + 'DESCRIPTOR' : _EXPONENTIALHISTOGRAMDATAPOINT, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint) + }) +_sym_db.RegisterMessage(ExponentialHistogramDataPoint) +_sym_db.RegisterMessage(ExponentialHistogramDataPoint.Buckets) + +SummaryDataPoint = _reflection.GeneratedProtocolMessageType('SummaryDataPoint', (_message.Message,), { + + 'ValueAtQuantile' : _reflection.GeneratedProtocolMessageType('ValueAtQuantile', (_message.Message,), { + 'DESCRIPTOR' : _SUMMARYDATAPOINT_VALUEATQUANTILE, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile) + }) + , + 'DESCRIPTOR' : _SUMMARYDATAPOINT, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.SummaryDataPoint) + }) +_sym_db.RegisterMessage(SummaryDataPoint) +_sym_db.RegisterMessage(SummaryDataPoint.ValueAtQuantile) + +Exemplar = _reflection.GeneratedProtocolMessageType('Exemplar', (_message.Message,), { + 'DESCRIPTOR' : _EXEMPLAR, + '__module__' : 'opentelemetry.proto.metrics.v1.metrics_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.metrics.v1.Exemplar) + }) +_sym_db.RegisterMessage(Exemplar) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n!io.opentelemetry.proto.metrics.v1B\014MetricsProtoP\001Z)go.opentelemetry.io/proto/otlp/metrics/v1' + _RESOURCEMETRICS.fields_by_name['instrumentation_library_metrics']._options = None + _RESOURCEMETRICS.fields_by_name['instrumentation_library_metrics']._serialized_options = b'\030\001' + _INSTRUMENTATIONLIBRARYMETRICS._options = None + _INSTRUMENTATIONLIBRARYMETRICS._serialized_options = b'\030\001' + _AGGREGATIONTEMPORALITY._serialized_start=3754 + _AGGREGATIONTEMPORALITY._serialized_end=3894 + _DATAPOINTFLAGS._serialized_start=3896 + _DATAPOINTFLAGS._serialized_end=3955 + _METRICSDATA._serialized_start=172 + _METRICSDATA._serialized_end=260 + _RESOURCEMETRICS._serialized_start=263 + _RESOURCEMETRICS._serialized_end=539 + _SCOPEMETRICS._serialized_start=542 + _SCOPEMETRICS._serialized_end=701 + _INSTRUMENTATIONLIBRARYMETRICS._serialized_start=704 + _INSTRUMENTATIONLIBRARYMETRICS._serialized_end=904 + _METRIC._serialized_start=907 + _METRIC._serialized_end=1309 + _GAUGE._serialized_start=1311 + _GAUGE._serialized_end=1388 + _SUM._serialized_start=1391 + _SUM._serialized_end=1577 + _HISTOGRAM._serialized_start=1580 + _HISTOGRAM._serialized_end=1753 + _EXPONENTIALHISTOGRAM._serialized_start=1756 + _EXPONENTIALHISTOGRAM._serialized_end=1951 + _SUMMARY._serialized_start=1953 + _SUMMARY._serialized_end=2033 + _NUMBERDATAPOINT._serialized_start=2036 + _NUMBERDATAPOINT._serialized_end=2298 + _HISTOGRAMDATAPOINT._serialized_start=2301 + _HISTOGRAMDATAPOINT._serialized_end=2659 + _EXPONENTIALHISTOGRAMDATAPOINT._serialized_start=2662 + _EXPONENTIALHISTOGRAMDATAPOINT._serialized_end=3227 + _EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS._serialized_start=3163 + _EXPONENTIALHISTOGRAMDATAPOINT_BUCKETS._serialized_end=3211 + _SUMMARYDATAPOINT._serialized_start=3230 + _SUMMARYDATAPOINT._serialized_end=3555 + _SUMMARYDATAPOINT_VALUEATQUANTILE._serialized_start=3499 + _SUMMARYDATAPOINT_VALUEATQUANTILE._serialized_end=3549 + _EXEMPLAR._serialized_start=3558 + _EXEMPLAR._serialized_end=3751 +# @@protoc_insertion_point(module_scope) \ No newline at end of file diff --git a/newrelic/packages/opentelemetry_proto/resource_pb2.py b/newrelic/packages/opentelemetry_proto/resource_pb2.py new file mode 100644 index 000000000..8cc64e352 --- /dev/null +++ b/newrelic/packages/opentelemetry_proto/resource_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: opentelemetry/proto/resource/v1/resource.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from . import common_pb2 as opentelemetry_dot_proto_dot_common_dot_v1_dot_common__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.opentelemetry/proto/resource/v1/resource.proto\x12\x1fopentelemetry.proto.resource.v1\x1a*opentelemetry/proto/common/v1/common.proto\"i\n\x08Resource\x12;\n\nattributes\x18\x01 \x03(\x0b\x32\'.opentelemetry.proto.common.v1.KeyValue\x12 \n\x18\x64ropped_attributes_count\x18\x02 \x01(\rBa\n\"io.opentelemetry.proto.resource.v1B\rResourceProtoP\x01Z*go.opentelemetry.io/proto/otlp/resource/v1b\x06proto3') + + + +_RESOURCE = DESCRIPTOR.message_types_by_name['Resource'] +Resource = _reflection.GeneratedProtocolMessageType('Resource', (_message.Message,), { + 'DESCRIPTOR' : _RESOURCE, + '__module__' : 'opentelemetry.proto.resource.v1.resource_pb2' + # @@protoc_insertion_point(class_scope:opentelemetry.proto.resource.v1.Resource) + }) +_sym_db.RegisterMessage(Resource) + +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\"io.opentelemetry.proto.resource.v1B\rResourceProtoP\001Z*go.opentelemetry.io/proto/otlp/resource/v1' + _RESOURCE._serialized_start=127 + _RESOURCE._serialized_end=232 +# @@protoc_insertion_point(module_scope) diff --git a/setup.py b/setup.py index 2b1e5191e..ed8dbfb84 100644 --- a/setup.py +++ b/setup.py @@ -111,6 +111,7 @@ def build_extension(self, ext): "newrelic/packages/urllib3/packages", "newrelic/packages/urllib3/packages/backports", "newrelic/packages/wrapt", + "newrelic/packages/opentelemetry_proto", "newrelic.samplers", ] diff --git a/tests/agent_features/conftest.py b/tests/agent_features/conftest.py index 57263238b..bd6aa6c2a 100644 --- a/tests/agent_features/conftest.py +++ b/tests/agent_features/conftest.py @@ -30,6 +30,7 @@ "debug.record_transaction_failure": True, "debug.log_autorum_middleware": True, "agent_limits.errors_per_harvest": 100, + "ml_insights_events.enabled": True } collector_agent_registration = collector_agent_registration_fixture( diff --git a/tests/agent_features/test_configuration.py b/tests/agent_features/test_configuration.py index 79f2a41f1..1a311e693 100644 --- a/tests/agent_features/test_configuration.py +++ b/tests/agent_features/test_configuration.py @@ -591,6 +591,8 @@ def test_translate_deprecated_ignored_params_with_new_setting(): ("agent_run_id", None), ("entity_guid", None), ("distributed_tracing.exclude_newrelic_header", False), + ("otlp_host", "otlp.nr-data.net"), + ("otlp_port", 0), ), ) def test_default_values(name, expected_value): diff --git a/tests/agent_features/test_dimensional_metrics.py b/tests/agent_features/test_dimensional_metrics.py new file mode 100644 index 000000000..ef9e98418 --- /dev/null +++ b/tests/agent_features/test_dimensional_metrics.py @@ -0,0 +1,224 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.fixtures import reset_core_stats_engine +from testing_support.validators.validate_dimensional_metric_payload import ( + validate_dimensional_metric_payload, +) +from testing_support.validators.validate_dimensional_metrics_outside_transaction import ( + validate_dimensional_metrics_outside_transaction, +) +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +import newrelic.core.otlp_utils +from newrelic.api.application import application_instance +from newrelic.api.background_task import background_task +from newrelic.api.transaction import ( + record_dimensional_metric, + record_dimensional_metrics, +) +from newrelic.common.metric_utils import create_metric_identity +from newrelic.core.config import global_settings +from newrelic.packages import six + +try: + # python 2.x + reload +except NameError: + # python 3.x + from importlib import reload + + +@pytest.fixture(scope="module", autouse=True, params=["protobuf", "json"]) +def otlp_content_encoding(request): + if six.PY2 and request.param == "protobuf": + pytest.skip("OTLP protos are not compatible with Python 2.") + + _settings = global_settings() + prev = _settings.debug.otlp_content_encoding + _settings.debug.otlp_content_encoding = request.param + reload(newrelic.core.otlp_utils) + assert newrelic.core.otlp_utils.otlp_content_setting == request.param, "Content encoding mismatch." + + yield + + _settings.debug.otlp_content_encoding = prev + + +_test_tags_examples = [ + (None, None), + ({}, None), + ([], None), + ({"str": "a"}, frozenset({("str", "a")})), + ({"int": 1}, frozenset({("int", 1)})), + ({"float": 1.0}, frozenset({("float", 1.0)})), + ({"bool": True}, frozenset({("bool", True)})), + ({"list": [1]}, frozenset({("list", "[1]")})), + ({"dict": {"subtag": 1}}, frozenset({("dict", "{'subtag': 1}")})), + ([("tags-as-list", 1)], frozenset({("tags-as-list", 1)})), +] + + +@pytest.mark.parametrize("tags,expected", _test_tags_examples) +def test_create_metric_identity(tags, expected): + name = "Metric" + output_name, output_tags = create_metric_identity(name, tags=tags) + assert output_name == name, "Name does not match." + assert output_tags == expected, "Output tags do not match." + + +@pytest.mark.parametrize("tags,expected", _test_tags_examples) +@reset_core_stats_engine() +def test_record_dimensional_metric_inside_transaction(tags, expected): + @validate_transaction_metrics( + "test_record_dimensional_metric_inside_transaction", + background_task=True, + dimensional_metrics=[ + ("Metric", expected, 1), + ], + ) + @background_task(name="test_record_dimensional_metric_inside_transaction") + def _test(): + record_dimensional_metric("Metric", 1, tags=tags) + + _test() + + +@pytest.mark.parametrize("tags,expected", _test_tags_examples) +@reset_core_stats_engine() +def test_record_dimensional_metric_outside_transaction(tags, expected): + @validate_dimensional_metrics_outside_transaction([("Metric", expected, 1)]) + def _test(): + app = application_instance() + record_dimensional_metric("Metric", 1, tags=tags, application=app) + + _test() + + +@pytest.mark.parametrize("tags,expected", _test_tags_examples) +@reset_core_stats_engine() +def test_record_dimensional_metrics_inside_transaction(tags, expected): + @validate_transaction_metrics( + "test_record_dimensional_metrics_inside_transaction", + background_task=True, + dimensional_metrics=[("Metric.1", expected, 1), ("Metric.2", expected, 1)], + ) + @background_task(name="test_record_dimensional_metrics_inside_transaction") + def _test(): + record_dimensional_metrics([("Metric.1", 1, tags), ("Metric.2", 1, tags)]) + + _test() + + +@pytest.mark.parametrize("tags,expected", _test_tags_examples) +@reset_core_stats_engine() +def test_record_dimensional_metrics_outside_transaction(tags, expected): + @validate_dimensional_metrics_outside_transaction([("Metric.1", expected, 1), ("Metric.2", expected, 1)]) + def _test(): + app = application_instance() + record_dimensional_metrics([("Metric.1", 1, tags), ("Metric.2", 1, tags)], application=app) + + _test() + + +@reset_core_stats_engine() +def test_dimensional_metrics_different_tags(): + @validate_transaction_metrics( + "test_dimensional_metrics_different_tags", + background_task=True, + dimensional_metrics=[ + ("Metric", frozenset({("tag", 1)}), 1), + ("Metric", frozenset({("tag", 2)}), 2), + ], + ) + @background_task(name="test_dimensional_metrics_different_tags") + def _test(): + record_dimensional_metrics( + [ + ("Metric", 1, {"tag": 1}), + ("Metric", 1, {"tag": 2}), + ] + ) + record_dimensional_metric("Metric", 1, {"tag": 2}) + + _test() + + +@reset_core_stats_engine() +@validate_dimensional_metric_payload( + summary_metrics=[ + ("Metric.Summary", {"tag": 1}, 1), + ("Metric.Summary", {"tag": 2}, 1), + ("Metric.Summary", None, 1), + ("Metric.Mixed", {"tag": 1}, 1), + ("Metric.NotPresent", None, None), + ], + count_metrics=[ + ("Metric.Count", {"tag": 1}, 1), + ("Metric.Count", {"tag": 2}, 2), + ("Metric.Count", None, 3), + ("Metric.Mixed", {"tag": 2}, 2), + ("Metric.NotPresent", None, None), + ], +) +def test_dimensional_metrics_payload(): + @background_task(name="test_dimensional_metric_payload") + def _test(): + record_dimensional_metrics( + [ + ("Metric.Summary", 1, {"tag": 1}), + ("Metric.Summary", 2, {"tag": 2}), + ("Metric.Summary", 3), # No tags + ("Metric.Count", {"count": 1}, {"tag": 1}), + ("Metric.Count", {"count": 2}, {"tag": 2}), + ("Metric.Count", {"count": 3}), # No tags + ("Metric.Mixed", 1, {"tag": 1}), + ("Metric.Mixed", {"count": 2}, {"tag": 2}), + ] + ) + + _test() + app = application_instance() + core_app = app._agent.application(app.name) + core_app.harvest() + + +@reset_core_stats_engine() +@validate_dimensional_metric_payload( + summary_metrics=[ + ("Metric.Summary", None, 1), + ("Metric.Count", None, None), # Should NOT be present + ], + count_metrics=[ + ("Metric.Count", None, 1), + ("Metric.Summary", None, None), # Should NOT be present + ], +) +def test_dimensional_metrics_no_duplicate_encodings(): + @background_task(name="test_dimensional_metric_payload") + def _test(): + record_dimensional_metrics( + [ + ("Metric.Summary", 1), + ("Metric.Count", {"count": 1}), + ] + ) + + _test() + app = application_instance() + core_app = app._agent.application(app.name) + core_app.harvest() diff --git a/tests/agent_features/test_high_security_mode.py b/tests/agent_features/test_high_security_mode.py index 20d997837..d2ded9308 100644 --- a/tests/agent_features/test_high_security_mode.py +++ b/tests/agent_features/test_high_security_mode.py @@ -79,8 +79,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "high_security": False, @@ -88,8 +90,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "high_security": False, @@ -97,8 +101,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": False, @@ -106,8 +112,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "off", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, ] @@ -118,8 +126,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": True, @@ -127,8 +137,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": True, @@ -136,8 +148,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": True, @@ -145,8 +159,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "high_security": True, @@ -154,8 +170,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "high_security": True, @@ -163,8 +181,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "off", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "high_security": True, @@ -172,8 +192,10 @@ def test_hsm_configuration_default(): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "message_tracer.segment_parameters_enabled": False, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, ] @@ -196,8 +218,10 @@ def test_local_config_file_override_hsm_disabled(settings): original_record_sql = settings.transaction_tracer.record_sql original_strip_messages = settings.strip_exception_messages.enabled original_custom_events = settings.custom_insights_events.enabled + original_ml_events = settings.ml_insights_events.enabled original_message_segment_params_enabled = settings.message_tracer.segment_parameters_enabled original_application_logging_forwarding_enabled = settings.application_logging.forwarding.enabled + original_machine_learning_inference_event_value_enabled = settings.machine_learning.inference_events_value.enabled apply_local_high_security_mode_setting(settings) @@ -205,8 +229,13 @@ def test_local_config_file_override_hsm_disabled(settings): assert settings.transaction_tracer.record_sql == original_record_sql assert settings.strip_exception_messages.enabled == original_strip_messages assert settings.custom_insights_events.enabled == original_custom_events + assert settings.ml_insights_events.enabled == original_ml_events assert settings.message_tracer.segment_parameters_enabled == original_message_segment_params_enabled assert settings.application_logging.forwarding.enabled == original_application_logging_forwarding_enabled + assert ( + settings.machine_learning.inference_events_value.enabled + == original_machine_learning_inference_event_value_enabled + ) @parameterize_hsm_local_config(_hsm_local_config_file_settings_enabled) @@ -217,8 +246,10 @@ def test_local_config_file_override_hsm_enabled(settings): assert settings.transaction_tracer.record_sql in ("off", "obfuscated") assert settings.strip_exception_messages.enabled assert settings.custom_insights_events.enabled is False + assert settings.ml_insights_events.enabled is False assert settings.message_tracer.segment_parameters_enabled is False assert settings.application_logging.forwarding.enabled is False + assert settings.machine_learning.inference_events_value.enabled is False _server_side_config_settings_hsm_disabled = [ @@ -229,7 +260,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "agent_config": { @@ -237,7 +270,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, }, ), @@ -248,7 +283,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, { "agent_config": { @@ -256,7 +293,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "off", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, }, ), @@ -270,7 +309,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": True, @@ -278,13 +319,17 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, "agent_config": { "capture_params": False, "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, }, ), @@ -295,7 +340,9 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, }, { "high_security": True, @@ -303,13 +350,17 @@ def test_local_config_file_override_hsm_enabled(settings): "transaction_tracer.record_sql": "obfuscated", "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, "application_logging.forwarding.enabled": False, + "machine_learning.inference_events_value.enabled": False, "agent_config": { "capture_params": True, "transaction_tracer.record_sql": "raw", "strip_exception_messages.enabled": False, "custom_insights_events.enabled": True, + "ml_insights_events.enabled": True, "application_logging.forwarding.enabled": True, + "machine_learning.inference_events_value.enabled": True, }, }, ), @@ -329,7 +380,9 @@ def test_remote_config_fixups_hsm_disabled(local_settings, server_settings): original_record_sql = agent_config["transaction_tracer.record_sql"] original_strip_messages = agent_config["strip_exception_messages.enabled"] original_custom_events = agent_config["custom_insights_events.enabled"] + original_ml_events = agent_config["ml_insights_events.enabled"] original_log_forwarding = agent_config["application_logging.forwarding.enabled"] + original_machine_learning_events = agent_config["machine_learning.inference_events_value.enabled"] _settings = global_settings() settings = override_generic_settings(_settings, local_settings)(AgentProtocol._apply_high_security_mode_fixups)( @@ -344,7 +397,9 @@ def test_remote_config_fixups_hsm_disabled(local_settings, server_settings): assert agent_config["transaction_tracer.record_sql"] == original_record_sql assert agent_config["strip_exception_messages.enabled"] == original_strip_messages assert agent_config["custom_insights_events.enabled"] == original_custom_events + assert agent_config["ml_insights_events.enabled"] == original_ml_events assert agent_config["application_logging.forwarding.enabled"] == original_log_forwarding + assert agent_config["machine_learning.inference_events_value.enabled"] == original_machine_learning_events @pytest.mark.parametrize("local_settings,server_settings", _server_side_config_settings_hsm_enabled) @@ -366,13 +421,17 @@ def test_remote_config_fixups_hsm_enabled(local_settings, server_settings): assert "transaction_tracer.record_sql" not in settings assert "strip_exception_messages.enabled" not in settings assert "custom_insights_events.enabled" not in settings + assert "ml_insights_events.enabled" not in settings assert "application_logging.forwarding.enabled" not in settings + assert "machine_learning.inference_events_value.enabled" not in settings assert "capture_params" not in agent_config assert "transaction_tracer.record_sql" not in agent_config assert "strip_exception_messages.enabled" not in agent_config assert "custom_insights_events.enabled" not in agent_config + assert "ml_insights_events.enabled" not in agent_config assert "application_logging.forwarding.enabled" not in agent_config + assert "machine_learning.inference_events_value.enabled" not in agent_config def test_remote_config_hsm_fixups_server_side_disabled(): @@ -397,6 +456,7 @@ def test_remote_config_hsm_fixups_server_side_disabled(): "high_security": True, "strip_exception_messages.enabled": True, "custom_insights_events.enabled": False, + "ml_insights_events.enabled": False, } diff --git a/tests/agent_features/test_metric_normalization.py b/tests/agent_features/test_metric_normalization.py new file mode 100644 index 000000000..65f2903ae --- /dev/null +++ b/tests/agent_features/test_metric_normalization.py @@ -0,0 +1,78 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.fixtures import reset_core_stats_engine +from testing_support.validators.validate_dimensional_metric_payload import ( + validate_dimensional_metric_payload, +) +from testing_support.validators.validate_metric_payload import validate_metric_payload + +from newrelic.api.application import application_instance +from newrelic.api.background_task import background_task +from newrelic.api.transaction import record_custom_metric, record_dimensional_metric +from newrelic.core.rules_engine import NormalizationRule, RulesEngine + +RULES = [{"match_expression": "(replace)", "replacement": "expected", "ignore": False, "eval_order": 0}] +EXPECTED_TAGS = frozenset({"tag": 1}.items()) + + +def _prepare_rules(test_rules): + # ensure all keys are present, if not present set to an empty string + for rule in test_rules: + for key in NormalizationRule._fields: + rule[key] = rule.get(key, "") + return test_rules + + +@pytest.fixture(scope="session") +def core_app(collector_agent_registration): + app = collector_agent_registration + return app._agent.application(app.name) + + +@pytest.fixture(scope="function") +def rules_engine_fixture(core_app): + rules_engine = core_app._rules_engine + previous_rules = rules_engine["metric"] + + rules_engine["metric"] = RulesEngine(_prepare_rules(RULES)) + yield + rules_engine["metric"] = previous_rules # Restore after test run + + +@validate_dimensional_metric_payload(summary_metrics=[("Metric/expected", EXPECTED_TAGS, 1)]) +@validate_metric_payload([("Metric/expected", 1)]) +@reset_core_stats_engine() +def test_metric_normalization_inside_transaction(core_app, rules_engine_fixture): + @background_task(name="test_record_dimensional_metric_inside_transaction") + def _test(): + record_dimensional_metric("Metric/replace", 1, tags={"tag": 1}) + record_custom_metric("Metric/replace", 1) + + _test() + core_app.harvest() + + +@validate_dimensional_metric_payload(summary_metrics=[("Metric/expected", EXPECTED_TAGS, 1)]) +@validate_metric_payload([("Metric/expected", 1)]) +@reset_core_stats_engine() +def test_metric_normalization_outside_transaction(core_app, rules_engine_fixture): + def _test(): + app = application_instance() + record_dimensional_metric("Metric/replace", 1, tags={"tag": 1}, application=app) + record_custom_metric("Metric/replace", 1, application=app) + + _test() + core_app.harvest() diff --git a/tests/agent_features/test_ml_events.py b/tests/agent_features/test_ml_events.py new file mode 100644 index 000000000..5720224bb --- /dev/null +++ b/tests/agent_features/test_ml_events.py @@ -0,0 +1,199 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +import pytest +from testing_support.fixtures import ( # function_not_called,; override_application_settings, + function_not_called, + override_application_settings, + reset_core_stats_engine, +) +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_event_payload import ( + validate_ml_event_payload, +) +from testing_support.validators.validate_ml_events import validate_ml_events +from testing_support.validators.validate_ml_events_outside_transaction import ( + validate_ml_events_outside_transaction, +) + +import newrelic.core.otlp_utils +from newrelic.api.application import application_instance as application +from newrelic.api.background_task import background_task +from newrelic.api.transaction import record_ml_event +from newrelic.core.config import global_settings +from newrelic.packages import six + +try: + # python 2.x + reload +except NameError: + # python 3.x + from importlib import reload + +_now = time.time() + +_intrinsics = { + "type": "LabelEvent", + "timestamp": _now, +} + + +@pytest.fixture(scope="session") +def core_app(collector_agent_registration): + app = collector_agent_registration + return app._agent.application(app.name) + + +@validate_ml_event_payload( + [{"foo": "bar", "real_agent_id": "1234567", "event.domain": "newrelic.ml_events", "event.name": "InferenceEvent"}] +) +@reset_core_stats_engine() +def test_ml_event_payload_inside_transaction(core_app): + @background_task(name="test_ml_event_payload_inside_transaction") + def _test(): + record_ml_event("InferenceEvent", {"foo": "bar"}) + + _test() + core_app.harvest() + + +@validate_ml_event_payload( + [{"foo": "bar", "real_agent_id": "1234567", "event.domain": "newrelic.ml_events", "event.name": "InferenceEvent"}] +) +@reset_core_stats_engine() +def test_ml_event_payload_outside_transaction(core_app): + def _test(): + app = application() + record_ml_event("InferenceEvent", {"foo": "bar"}, application=app) + + _test() + core_app.harvest() + + +@pytest.mark.parametrize( + "params,expected", + [ + ({"foo": "bar"}, [(_intrinsics, {"foo": "bar"})]), + ({"foo": "bar", 123: "bad key"}, [(_intrinsics, {"foo": "bar"})]), + ({"foo": "bar", "*" * 256: "too long"}, [(_intrinsics, {"foo": "bar"})]), + ], + ids=["Valid key/value", "Bad key", "Value too long"], +) +@reset_core_stats_engine() +def test_record_ml_event_inside_transaction(params, expected): + @validate_ml_events(expected) + @background_task() + def _test(): + record_ml_event("LabelEvent", params) + + _test() + + +@pytest.mark.parametrize( + "params,expected", + [ + ({"foo": "bar"}, [(_intrinsics, {"foo": "bar"})]), + ({"foo": "bar", 123: "bad key"}, [(_intrinsics, {"foo": "bar"})]), + ({"foo": "bar", "*" * 256: "too long"}, [(_intrinsics, {"foo": "bar"})]), + ], + ids=["Valid key/value", "Bad key", "Value too long"], +) +@reset_core_stats_engine() +def test_record_ml_event_outside_transaction(params, expected): + @validate_ml_events_outside_transaction(expected) + def _test(): + app = application() + record_ml_event("LabelEvent", params, application=app) + + _test() + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +@background_task() +def test_record_ml_event_inside_transaction_bad_event_type(): + record_ml_event("!@#$%^&*()", {"foo": "bar"}) + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +def test_record_ml_event_outside_transaction_bad_event_type(): + app = application() + record_ml_event("!@#$%^&*()", {"foo": "bar"}, application=app) + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +@background_task() +def test_record_ml_event_inside_transaction_params_not_a_dict(): + record_ml_event("ParamsListEvent", ["not", "a", "dict"]) + + +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +def test_record_ml_event_outside_transaction_params_not_a_dict(): + app = application() + record_ml_event("ParamsListEvent", ["not", "a", "dict"], application=app) + + +# Tests for ML Events configuration settings + +@override_application_settings({"ml_insights_events.enabled": False}) +@reset_core_stats_engine() +@validate_ml_event_count(count=0) +@background_task() +def test_ml_event_settings_check_ml_insights_disabled(): + record_ml_event("FooEvent", {"foo": "bar"}) + + +# Test that record_ml_event() methods will short-circuit. +# +# If the ml_insights_events setting is False, verify that the +# `create_ml_event()` function is not called, in order to avoid the +# event_type and attribute processing. + + +@override_application_settings({"ml_insights_events.enabled": False}) +@reset_core_stats_engine() +@function_not_called("newrelic.api.transaction", "create_custom_event") +@background_task() +def test_transaction_create_ml_event_not_called(): + record_ml_event("FooEvent", {"foo": "bar"}) + + +@override_application_settings({"ml_insights_events.enabled": False}) +@reset_core_stats_engine() +@function_not_called("newrelic.core.application", "create_custom_event") +@background_task() +def test_application_create_ml_event_not_called(): + app = application() + record_ml_event("FooEvent", {"foo": "bar"}, application=app) + + +@pytest.fixture(scope="module", autouse=True, params=["protobuf", "json"]) +def otlp_content_encoding(request): + if six.PY2 and request.param == "protobuf": + pytest.skip("OTLP protos are not compatible with Python 2.") + + _settings = global_settings() + prev = _settings.debug.otlp_content_encoding + _settings.debug.otlp_content_encoding = request.param + reload(newrelic.core.otlp_utils) + assert newrelic.core.otlp_utils.otlp_content_setting == request.param, "Content encoding mismatch." + + yield + + _settings.debug.otlp_content_encoding = prev diff --git a/tests/agent_unittests/test_harvest_loop.py b/tests/agent_unittests/test_harvest_loop.py index 305622107..15b67a81e 100644 --- a/tests/agent_unittests/test_harvest_loop.py +++ b/tests/agent_unittests/test_harvest_loop.py @@ -32,7 +32,7 @@ from newrelic.core.function_node import FunctionNode from newrelic.core.log_event_node import LogEventNode from newrelic.core.root_node import RootNode -from newrelic.core.stats_engine import CustomMetrics, SampledDataSet +from newrelic.core.stats_engine import CustomMetrics, SampledDataSet, DimensionalMetrics from newrelic.core.transaction_node import TransactionNode from newrelic.network.exceptions import RetryDataForRequest @@ -49,6 +49,11 @@ def transaction_node(request): event = create_custom_event("Custom", {}) custom_events.add(event) + ml_events = SampledDataSet(capacity=num_events) + for _ in range(num_events): + event = create_custom_event("Custom", {}) + ml_events.add(event) + log_events = SampledDataSet(capacity=num_events) for _ in range(num_events): event = LogEventNode(1653609717, "WARNING", "A", {}) @@ -122,10 +127,12 @@ def transaction_node(request): errors=errors, slow_sql=(), custom_events=custom_events, + ml_events=ml_events, log_events=log_events, apdex_t=0.5, suppress_apdex=False, custom_metrics=CustomMetrics(), + dimensional_metrics=DimensionalMetrics(), guid="4485b89db608aece", cpu_time=0.0, suppress_transaction_trace=False, @@ -818,6 +825,7 @@ def test_flexible_events_harvested(allowlist_event): app._stats_engine.log_events.add(LogEventNode(1653609717, "WARNING", "A", {})) app._stats_engine.span_events.add("span event") app._stats_engine.record_custom_metric("CustomMetric/Int", 1) + app._stats_engine.record_dimensional_metric("DimensionalMetric/Int", 1, tags={"tag": "tag"}) assert app._stats_engine.transaction_events.num_seen == 1 assert app._stats_engine.error_events.num_seen == 1 @@ -825,6 +833,7 @@ def test_flexible_events_harvested(allowlist_event): assert app._stats_engine.log_events.num_seen == 1 assert app._stats_engine.span_events.num_seen == 1 assert app._stats_engine.record_custom_metric("CustomMetric/Int", 1) + assert app._stats_engine.record_dimensional_metric("DimensionalMetric/Int", 1, tags={"tag": "tag"}) app.harvest(flexible=True) @@ -844,7 +853,8 @@ def test_flexible_events_harvested(allowlist_event): assert app._stats_engine.span_events.num_seen == num_seen assert ("CustomMetric/Int", "") in app._stats_engine.stats_table - assert app._stats_engine.metrics_count() > 1 + assert ("DimensionalMetric/Int", frozenset({("tag", "tag")})) in app._stats_engine.dimensional_stats_table + assert app._stats_engine.metrics_count() > 3 @pytest.mark.parametrize( diff --git a/tests/agent_unittests/test_utilization_settings.py b/tests/agent_unittests/test_utilization_settings.py index 8af4bcbf1..96cf47669 100644 --- a/tests/agent_unittests/test_utilization_settings.py +++ b/tests/agent_unittests/test_utilization_settings.py @@ -118,6 +118,22 @@ def reset(wrapped, instance, args, kwargs): return reset +@reset_agent_config(INI_FILE_WITHOUT_UTIL_CONF, ENV_WITHOUT_UTIL_CONF) +def test_otlp_host_port_default(): + settings = global_settings() + assert settings.otlp_host == "otlp.nr-data.net" + assert settings.otlp_port == 0 + + +@reset_agent_config( + INI_FILE_WITHOUT_UTIL_CONF, {"NEW_RELIC_OTLP_HOST": "custom-otlp.nr-data.net", "NEW_RELIC_OTLP_PORT": 443} +) +def test_otlp_port_override(): + settings = global_settings() + assert settings.otlp_host == "custom-otlp.nr-data.net" + assert settings.otlp_port == 443 + + @reset_agent_config(INI_FILE_WITHOUT_UTIL_CONF, ENV_WITHOUT_UTIL_CONF) def test_heroku_default(): settings = global_settings() diff --git a/tests/mlmodel_sklearn/conftest.py b/tests/mlmodel_sklearn/conftest.py new file mode 100644 index 000000000..d91eb549a --- /dev/null +++ b/tests/mlmodel_sklearn/conftest.py @@ -0,0 +1,34 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from testing_support.fixtures import ( # noqa: F401, pylint: disable=W0611 + collector_agent_registration_fixture, + collector_available_fixture, +) + +_default_settings = { + "transaction_tracer.explain_threshold": 0.0, + "transaction_tracer.transaction_threshold": 0.0, + "transaction_tracer.stack_trace_threshold": 0.0, + "debug.log_data_collector_payloads": True, + "debug.record_transaction_failure": True, + "machine_learning.enabled": True, + "machine_learning.inference_events_value.enabled": True, + "ml_insights_events.enabled": True +} +collector_agent_registration = collector_agent_registration_fixture( + app_name="Python Agent Test (mlmodel_sklearn)", + default_settings=_default_settings, + linked_applications=["Python Agent Test (mlmodel_sklearn)"], +) diff --git a/tests/mlmodel_sklearn/test_calibration_models.py b/tests/mlmodel_sklearn/test_calibration_models.py new file mode 100644 index 000000000..39ac34cb2 --- /dev/null +++ b/tests/mlmodel_sklearn/test_calibration_models.py @@ -0,0 +1,76 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +def test_model_methods_wrapped_in_function_trace(calibration_model_name, run_calibration_model): + expected_scoped_metrics = { + "CalibratedClassifierCV": [ + ("Function/MLModel/Sklearn/Named/CalibratedClassifierCV.fit", 1), + ("Function/MLModel/Sklearn/Named/CalibratedClassifierCV.predict", 1), + ("Function/MLModel/Sklearn/Named/CalibratedClassifierCV.predict_proba", 2), + ], + } + + expected_transaction_name = "test_calibration_models:_test" + if six.PY3: + expected_transaction_name = ( + "test_calibration_models:test_model_methods_wrapped_in_function_trace.._test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[calibration_model_name], + rollup_metrics=expected_scoped_metrics[calibration_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_calibration_model() + + _test() + + +@pytest.fixture(params=["CalibratedClassifierCV"]) +def calibration_model_name(request): + return request.param + + +@pytest.fixture +def run_calibration_model(calibration_model_name): + def _run(): + import sklearn.calibration + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.calibration, calibration_model_name)() + + model = clf.fit(x_train, y_train) + model.predict(x_test) + + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_cluster_models.py b/tests/mlmodel_sklearn/test_cluster_models.py new file mode 100644 index 000000000..906995c22 --- /dev/null +++ b/tests/mlmodel_sklearn/test_cluster_models.py @@ -0,0 +1,186 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn import __version__ # noqa: this is needed for get_package_version +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "cluster_model_name", + [ + "AffinityPropagation", + "AgglomerativeClustering", + "Birch", + "DBSCAN", + "FeatureAgglomeration", + "KMeans", + "MeanShift", + "MiniBatchKMeans", + "SpectralBiclustering", + "SpectralCoclustering", + "SpectralClustering", + ], +) +def test_below_v1_1_model_methods_wrapped_in_function_trace(cluster_model_name, run_cluster_model): + expected_scoped_metrics = { + "AffinityPropagation": [ + ("Function/MLModel/Sklearn/Named/AffinityPropagation.fit", 2), + ("Function/MLModel/Sklearn/Named/AffinityPropagation.predict", 1), + ("Function/MLModel/Sklearn/Named/AffinityPropagation.fit_predict", 1), + ], + "AgglomerativeClustering": [ + ("Function/MLModel/Sklearn/Named/AgglomerativeClustering.fit", 2), + ("Function/MLModel/Sklearn/Named/AgglomerativeClustering.fit_predict", 1), + ], + "Birch": [ + ("Function/MLModel/Sklearn/Named/Birch.fit", 2), + ( + "Function/MLModel/Sklearn/Named/Birch.predict", + 1 if SKLEARN_VERSION >= (1, 0, 0) else 3, + ), + ("Function/MLModel/Sklearn/Named/Birch.fit_predict", 1), + ("Function/MLModel/Sklearn/Named/Birch.transform", 1), + ], + "DBSCAN": [ + ("Function/MLModel/Sklearn/Named/DBSCAN.fit", 2), + ("Function/MLModel/Sklearn/Named/DBSCAN.fit_predict", 1), + ], + "FeatureAgglomeration": [ + ("Function/MLModel/Sklearn/Named/FeatureAgglomeration.fit", 1), + ("Function/MLModel/Sklearn/Named/FeatureAgglomeration.transform", 1), + ], + "KMeans": [ + ("Function/MLModel/Sklearn/Named/KMeans.fit", 2), + ("Function/MLModel/Sklearn/Named/KMeans.predict", 1), + ("Function/MLModel/Sklearn/Named/KMeans.fit_predict", 1), + ("Function/MLModel/Sklearn/Named/KMeans.transform", 1), + ], + "MeanShift": [ + ("Function/MLModel/Sklearn/Named/MeanShift.fit", 2), + ("Function/MLModel/Sklearn/Named/MeanShift.predict", 1), + ("Function/MLModel/Sklearn/Named/MeanShift.fit_predict", 1), + ], + "MiniBatchKMeans": [ + ("Function/MLModel/Sklearn/Named/MiniBatchKMeans.fit", 2), + ("Function/MLModel/Sklearn/Named/MiniBatchKMeans.predict", 1), + ("Function/MLModel/Sklearn/Named/MiniBatchKMeans.fit_predict", 1), + ], + "SpectralBiclustering": [ + ("Function/MLModel/Sklearn/Named/SpectralBiclustering.fit", 1), + ], + "SpectralCoclustering": [ + ("Function/MLModel/Sklearn/Named/SpectralCoclustering.fit", 1), + ], + "SpectralClustering": [ + ("Function/MLModel/Sklearn/Named/SpectralClustering.fit", 2), + ("Function/MLModel/Sklearn/Named/SpectralClustering.fit_predict", 1), + ], + } + expected_transaction_name = "test_cluster_models:_test" + if six.PY3: + expected_transaction_name = ( + "test_cluster_models:test_below_v1_1_model_methods_wrapped_in_function_trace.._test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[cluster_model_name], + rollup_metrics=expected_scoped_metrics[cluster_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_cluster_model(cluster_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 1, 0), reason="Requires sklearn > 1.1") +@pytest.mark.parametrize( + "cluster_model_name", + [ + "BisectingKMeans", + "OPTICS", + ], +) +def test_above_v1_1_model_methods_wrapped_in_function_trace(cluster_model_name, run_cluster_model): + expected_scoped_metrics = { + "BisectingKMeans": [ + ("Function/MLModel/Sklearn/Named/BisectingKMeans.fit", 2), + ("Function/MLModel/Sklearn/Named/BisectingKMeans.predict", 1), + ("Function/MLModel/Sklearn/Named/BisectingKMeans.fit_predict", 1), + ], + "OPTICS": [ + ("Function/MLModel/Sklearn/Named/OPTICS.fit", 2), + ("Function/MLModel/Sklearn/Named/OPTICS.fit_predict", 1), + ], + } + expected_transaction_name = "test_cluster_models:_test" + if six.PY3: + expected_transaction_name = ( + "test_cluster_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[cluster_model_name], + rollup_metrics=expected_scoped_metrics[cluster_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_cluster_model(cluster_model_name) + + _test() + + +@pytest.fixture +def run_cluster_model(): + def _run(cluster_model_name): + import sklearn.cluster + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.cluster, cluster_model_name)() + + model = clf.fit(x_train, y_train) + + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "fit_predict"): + model.fit_predict(x_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_compose_models.py b/tests/mlmodel_sklearn/test_compose_models.py new file mode 100644 index 000000000..eab076fc3 --- /dev/null +++ b/tests/mlmodel_sklearn/test_compose_models.py @@ -0,0 +1,94 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.linear_model import LinearRegression +from sklearn.preprocessing import Normalizer +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "compose_model_name", + [ + "ColumnTransformer", + "TransformedTargetRegressor", + ], +) +def test_model_methods_wrapped_in_function_trace(compose_model_name, run_compose_model): + expected_scoped_metrics = { + "ColumnTransformer": [ + ("Function/MLModel/Sklearn/Named/ColumnTransformer.fit", 1), + ("Function/MLModel/Sklearn/Named/ColumnTransformer.transform", 1), + ], + "TransformedTargetRegressor": [ + ("Function/MLModel/Sklearn/Named/TransformedTargetRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/TransformedTargetRegressor.predict", 1), + ], + } + + expected_transaction_name = ( + "test_compose_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_compose_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[compose_model_name], + rollup_metrics=expected_scoped_metrics[compose_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_compose_model(compose_model_name) + + _test() + + +@pytest.fixture +def run_compose_model(): + def _run(compose_model_name): + import numpy as np + import sklearn.compose + + if compose_model_name == "TransformedTargetRegressor": + kwargs = {"regressor": LinearRegression()} + X = np.arange(4).reshape(-1, 1) + y = np.exp(2 * X).ravel() + else: + X = [[0.0, 1.0, 2.0, 2.0], [1.0, 1.0, 0.0, 1.0]] + y = None + kwargs = { + "transformers": [ + ("norm1", Normalizer(norm="l1"), [0, 1]), + ("norm2", Normalizer(norm="l1"), slice(2, 4)), + ] + } + + clf = getattr(sklearn.compose, compose_model_name)(**kwargs) + + model = clf.fit(X, y) + if hasattr(model, "predict"): + model.predict(X) + if hasattr(model, "transform"): + model.transform(X) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_covariance_models.py b/tests/mlmodel_sklearn/test_covariance_models.py new file mode 100644 index 000000000..afa5c31c2 --- /dev/null +++ b/tests/mlmodel_sklearn/test_covariance_models.py @@ -0,0 +1,110 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "covariance_model_name", + [ + "EllipticEnvelope", + "EmpiricalCovariance", + "GraphicalLasso", + "GraphicalLassoCV", + "MinCovDet", + "ShrunkCovariance", + "LedoitWolf", + "OAS", + ], +) +def test_model_methods_wrapped_in_function_trace(covariance_model_name, run_covariance_model): + expected_scoped_metrics = { + "EllipticEnvelope": [ + ("Function/MLModel/Sklearn/Named/EllipticEnvelope.fit", 1), + ("Function/MLModel/Sklearn/Named/EllipticEnvelope.predict", 2), + ("Function/MLModel/Sklearn/Named/EllipticEnvelope.score", 1), + ], + "EmpiricalCovariance": [ + ("Function/MLModel/Sklearn/Named/EmpiricalCovariance.fit", 1), + ("Function/MLModel/Sklearn/Named/EmpiricalCovariance.score", 1), + ], + "GraphicalLasso": [ + ("Function/MLModel/Sklearn/Named/GraphicalLasso.fit", 1), + ], + "GraphicalLassoCV": [ + ("Function/MLModel/Sklearn/Named/GraphicalLassoCV.fit", 1), + ], + "MinCovDet": [ + ("Function/MLModel/Sklearn/Named/MinCovDet.fit", 1), + ], + "ShrunkCovariance": [ + ("Function/MLModel/Sklearn/Named/ShrunkCovariance.fit", 1), + ], + "LedoitWolf": [ + ("Function/MLModel/Sklearn/Named/LedoitWolf.fit", 1), + ], + "OAS": [ + ("Function/MLModel/Sklearn/Named/OAS.fit", 1), + ], + } + expected_transaction_name = ( + "test_covariance_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_covariance_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[covariance_model_name], + rollup_metrics=expected_scoped_metrics[covariance_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_covariance_model(covariance_model_name) + + _test() + + +@pytest.fixture +def run_covariance_model(): + def _run(covariance_model_name): + import sklearn.covariance + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {} + if covariance_model_name in ["EllipticEnvelope", "MinCovDet"]: + kwargs = {"random_state": 0} + + clf = getattr(sklearn.covariance, covariance_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_cross_decomposition_models.py b/tests/mlmodel_sklearn/test_cross_decomposition_models.py new file mode 100644 index 000000000..6a053350f --- /dev/null +++ b/tests/mlmodel_sklearn/test_cross_decomposition_models.py @@ -0,0 +1,81 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "cross_decomposition_model_name", + [ + "PLSRegression", + "PLSSVD", + ], +) +def test_model_methods_wrapped_in_function_trace(cross_decomposition_model_name, run_cross_decomposition_model): + expected_scoped_metrics = { + "PLSRegression": [ + ("Function/MLModel/Sklearn/Named/PLSRegression.fit", 1), + ], + "PLSSVD": [ + ("Function/MLModel/Sklearn/Named/PLSSVD.fit", 1), + ("Function/MLModel/Sklearn/Named/PLSSVD.transform", 1), + ], + } + expected_transaction_name = ( + "test_cross_decomposition_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_cross_decomposition_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[cross_decomposition_model_name], + rollup_metrics=expected_scoped_metrics[cross_decomposition_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_cross_decomposition_model(cross_decomposition_model_name) + + _test() + + +@pytest.fixture +def run_cross_decomposition_model(): + def _run(cross_decomposition_model_name): + import sklearn.cross_decomposition + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, _ = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {} + if cross_decomposition_model_name == "PLSSVD": + kwargs = {"n_components": 1} + clf = getattr(sklearn.cross_decomposition, cross_decomposition_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_discriminant_analysis_models.py b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py new file mode 100644 index 000000000..de1182696 --- /dev/null +++ b/tests/mlmodel_sklearn/test_discriminant_analysis_models.py @@ -0,0 +1,91 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "discriminant_analysis_model_name", + [ + "LinearDiscriminantAnalysis", + "QuadraticDiscriminantAnalysis", + ], +) +def test_model_methods_wrapped_in_function_trace(discriminant_analysis_model_name, run_discriminant_analysis_model): + expected_scoped_metrics = { + "LinearDiscriminantAnalysis": [ + ("Function/MLModel/Sklearn/Named/LinearDiscriminantAnalysis.fit", 1), + ("Function/MLModel/Sklearn/Named/LinearDiscriminantAnalysis.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/LinearDiscriminantAnalysis.predict_proba", 2), + ("Function/MLModel/Sklearn/Named/LinearDiscriminantAnalysis.transform", 1), + ], + "QuadraticDiscriminantAnalysis": [ + ("Function/MLModel/Sklearn/Named/QuadraticDiscriminantAnalysis.fit", 1), + ("Function/MLModel/Sklearn/Named/QuadraticDiscriminantAnalysis.predict", 1), + ("Function/MLModel/Sklearn/Named/QuadraticDiscriminantAnalysis.predict_proba", 2), + ("Function/MLModel/Sklearn/Named/QuadraticDiscriminantAnalysis.predict_log_proba", 1), + ], + } + + expected_transaction_name = ( + "test_discriminant_analysis_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_discriminant_analysis_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[discriminant_analysis_model_name], + rollup_metrics=expected_scoped_metrics[discriminant_analysis_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_discriminant_analysis_model(discriminant_analysis_model_name) + + _test() + + +@pytest.fixture +def run_discriminant_analysis_model(): + def _run(discriminant_analysis_model_name): + import sklearn.discriminant_analysis + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {} + clf = getattr(sklearn.discriminant_analysis, discriminant_analysis_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_dummy_models.py b/tests/mlmodel_sklearn/test_dummy_models.py new file mode 100644 index 000000000..d1059add1 --- /dev/null +++ b/tests/mlmodel_sklearn/test_dummy_models.py @@ -0,0 +1,94 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn import __init__ # noqa: needed for get_package_version +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "dummy_model_name", + [ + "DummyClassifier", + "DummyRegressor", + ], +) +def test_model_methods_wrapped_in_function_trace(dummy_model_name, run_dummy_model): + expected_scoped_metrics = { + "DummyClassifier": [ + ("Function/MLModel/Sklearn/Named/DummyClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/DummyClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/DummyClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/DummyClassifier.predict_proba", 2 if SKLEARN_VERSION > (1, 0, 0) else 4), + ("Function/MLModel/Sklearn/Named/DummyClassifier.score", 1), + ], + "DummyRegressor": [ + ("Function/MLModel/Sklearn/Named/DummyRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/DummyRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/DummyRegressor.score", 1), + ], + } + + expected_transaction_name = ( + "test_dummy_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_dummy_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[dummy_model_name], + rollup_metrics=expected_scoped_metrics[dummy_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_dummy_model(dummy_model_name) + + _test() + + +@pytest.fixture +def run_dummy_model(): + def _run(dummy_model_name): + import sklearn.dummy + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.dummy, dummy_model_name)() + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_ensemble_models.py b/tests/mlmodel_sklearn/test_ensemble_models.py new file mode 100644 index 000000000..4093edf76 --- /dev/null +++ b/tests/mlmodel_sklearn/test_ensemble_models.py @@ -0,0 +1,303 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "ensemble_model_name", + [ + "AdaBoostClassifier", + "AdaBoostRegressor", + "BaggingClassifier", + "BaggingRegressor", + "ExtraTreesClassifier", + "ExtraTreesRegressor", + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "IsolationForest", + "RandomForestClassifier", + "RandomForestRegressor", + "RandomTreesEmbedding", + "VotingClassifier", + ], +) +def test_below_v1_0_model_methods_wrapped_in_function_trace(ensemble_model_name, run_ensemble_model): + expected_scoped_metrics = { + "AdaBoostClassifier": [ + ("Function/MLModel/Sklearn/Named/AdaBoostClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/AdaBoostClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/AdaBoostClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/AdaBoostClassifier.predict_proba", 2), + ("Function/MLModel/Sklearn/Named/AdaBoostClassifier.score", 1), + ], + "AdaBoostRegressor": [ + ("Function/MLModel/Sklearn/Named/AdaBoostRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/AdaBoostRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/AdaBoostRegressor.score", 1), + ], + "BaggingClassifier": [ + ("Function/MLModel/Sklearn/Named/BaggingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/BaggingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/BaggingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/BaggingClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/BaggingClassifier.predict_proba", 3), + ], + "BaggingRegressor": [ + ("Function/MLModel/Sklearn/Named/BaggingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/BaggingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/BaggingRegressor.score", 1), + ], + "ExtraTreesClassifier": [ + ("Function/MLModel/Sklearn/Named/ExtraTreesClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreesClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreesClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreesClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreesClassifier.predict_proba", 4), + ], + "ExtraTreesRegressor": [ + ("Function/MLModel/Sklearn/Named/ExtraTreesRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreesRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreesRegressor.score", 1), + ], + "GradientBoostingClassifier": [ + ("Function/MLModel/Sklearn/Named/GradientBoostingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/GradientBoostingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/GradientBoostingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/GradientBoostingClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/GradientBoostingClassifier.predict_proba", 2), + ], + "GradientBoostingRegressor": [ + ("Function/MLModel/Sklearn/Named/GradientBoostingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/GradientBoostingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/GradientBoostingRegressor.score", 1), + ], + "IsolationForest": [ + ("Function/MLModel/Sklearn/Named/IsolationForest.fit", 1), + ("Function/MLModel/Sklearn/Named/IsolationForest.predict", 1), + ], + "RandomForestClassifier": [ + ("Function/MLModel/Sklearn/Named/RandomForestClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/RandomForestClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/RandomForestClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/RandomForestClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/RandomForestClassifier.predict_proba", 4), + ], + "RandomForestRegressor": [ + ("Function/MLModel/Sklearn/Named/RandomForestRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/RandomForestRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/RandomForestRegressor.score", 1), + ], + "RandomTreesEmbedding": [ + ("Function/MLModel/Sklearn/Named/RandomTreesEmbedding.fit", 1), + ("Function/MLModel/Sklearn/Named/RandomTreesEmbedding.transform", 1), + ], + "VotingClassifier": [ + ("Function/MLModel/Sklearn/Named/VotingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/VotingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/VotingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/VotingClassifier.transform", 1), + ("Function/MLModel/Sklearn/Named/VotingClassifier.predict_proba", 3), + ], + } + + expected_transaction_name = ( + "test_ensemble_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_ensemble_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[ensemble_model_name], + rollup_metrics=expected_scoped_metrics[ensemble_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_ensemble_model(ensemble_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 0, 0) or SKLEARN_VERSION >= (1, 1, 0), reason="Requires 1.0 <= sklearn < 1.1") +@pytest.mark.parametrize( + "ensemble_model_name", + [ + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "StackingClassifier", + "StackingRegressor", + "VotingRegressor", + ], +) +def test_between_v1_0_and_v1_1_model_methods_wrapped_in_function_trace(ensemble_model_name, run_ensemble_model): + expected_scoped_metrics = { + "HistGradientBoostingClassifier": [ + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.predict_proba", 3), + ], + "HistGradientBoostingRegressor": [ + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.score", 1), + ], + "StackingClassifier": [ + ("Function/MLModel/Sklearn/Named/StackingClassifier.fit", 1), + ], + "StackingRegressor": [ + ("Function/MLModel/Sklearn/Named/StackingRegressor.fit", 1), + ], + "VotingRegressor": [ + ("Function/MLModel/Sklearn/Named/VotingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/VotingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/VotingRegressor.score", 1), + ("Function/MLModel/Sklearn/Named/VotingRegressor.transform", 1), + ], + } + expected_transaction_name = ( + "test_ensemble_models:test_between_v1_0_and_v1_1_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_ensemble_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[ensemble_model_name], + rollup_metrics=expected_scoped_metrics[ensemble_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_ensemble_model(ensemble_model_name) + + _test() + + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 1, 0), reason="Requires sklearn >= 1.1") +@pytest.mark.parametrize( + "ensemble_model_name", + [ + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "StackingClassifier", + "StackingRegressor", + "VotingRegressor", + ], +) +def test_above_v1_1_model_methods_wrapped_in_function_trace(ensemble_model_name, run_ensemble_model): + expected_scoped_metrics = { + "StackingClassifier": [ + ("Function/MLModel/Sklearn/Named/StackingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/StackingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/StackingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/StackingClassifier.predict_proba", 1), + ("Function/MLModel/Sklearn/Named/StackingClassifier.transform", 4), + ], + "StackingRegressor": [ + ("Function/MLModel/Sklearn/Named/StackingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/StackingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/StackingRegressor.score", 1), + ], + "VotingRegressor": [ + ("Function/MLModel/Sklearn/Named/VotingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/VotingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/VotingRegressor.score", 1), + ("Function/MLModel/Sklearn/Named/VotingRegressor.transform", 1), + ], + "HistGradientBoostingClassifier": [ + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingClassifier.predict_proba", 3), + ], + "HistGradientBoostingRegressor": [ + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/HistGradientBoostingRegressor.score", 1), + ], + } + expected_transaction_name = ( + "test_ensemble_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_ensemble_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[ensemble_model_name], + rollup_metrics=expected_scoped_metrics[ensemble_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_ensemble_model(ensemble_model_name) + + _test() + + +@pytest.fixture +def run_ensemble_model(): + def _run(ensemble_model_name): + import sklearn.ensemble + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {"random_state": 0} + if ensemble_model_name == "StackingClassifier": + kwargs = {"estimators": [("rf", RandomForestClassifier())], "final_estimator": RandomForestClassifier()} + elif ensemble_model_name == "VotingClassifier": + kwargs = { + "estimators": [("rf", RandomForestClassifier())], + "voting": "soft", + } + elif ensemble_model_name == "VotingRegressor": + x_train = x_test = [[1, 1]] + y_train = y_test = [0] + kwargs = {"estimators": [("rf", RandomForestRegressor())]} + elif ensemble_model_name == "StackingRegressor": + kwargs = {"estimators": [("rf", RandomForestRegressor())]} + clf = getattr(sklearn.ensemble, ensemble_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_feature_selection_models.py b/tests/mlmodel_sklearn/test_feature_selection_models.py new file mode 100644 index 000000000..f4d601d32 --- /dev/null +++ b/tests/mlmodel_sklearn/test_feature_selection_models.py @@ -0,0 +1,138 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.ensemble import AdaBoostClassifier +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "feature_selection_model_name", + [ + "VarianceThreshold", + "RFE", + "RFECV", + "SelectFromModel", + ], +) +def test_below_v1_0_model_methods_wrapped_in_function_trace(feature_selection_model_name, run_feature_selection_model): + expected_scoped_metrics = { + "VarianceThreshold": [ + ("Function/MLModel/Sklearn/Named/VarianceThreshold.fit", 1), + ], + "RFE": [ + ("Function/MLModel/Sklearn/Named/RFE.fit", 1), + ("Function/MLModel/Sklearn/Named/RFE.predict", 1), + ("Function/MLModel/Sklearn/Named/RFE.score", 1), + ("Function/MLModel/Sklearn/Named/RFE.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/RFE.predict_proba", 1), + ], + "RFECV": [ + ("Function/MLModel/Sklearn/Named/RFECV.fit", 1), + ], + "SelectFromModel": [ + ("Function/MLModel/Sklearn/Named/SelectFromModel.fit", 1), + ], + } + + expected_transaction_name = ( + "test_feature_selection_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_feature_selection_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[feature_selection_model_name], + rollup_metrics=expected_scoped_metrics[feature_selection_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_feature_selection_model(feature_selection_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 0, 0), reason="Requires sklearn >= 1.0") +@pytest.mark.parametrize( + "feature_selection_model_name", + [ + "SequentialFeatureSelector", + ], +) +def test_above_v1_0_model_methods_wrapped_in_function_trace(feature_selection_model_name, run_feature_selection_model): + expected_scoped_metrics = { + "SequentialFeatureSelector": [ + ("Function/MLModel/Sklearn/Named/SequentialFeatureSelector.fit", 1), + ], + } + expected_transaction_name = ( + "test_feature_selection_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_feature_selection_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[feature_selection_model_name], + rollup_metrics=expected_scoped_metrics[feature_selection_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_feature_selection_model(feature_selection_model_name) + + _test() + + +@pytest.fixture +def run_feature_selection_model(): + def _run(feature_selection_model_name): + import sklearn.feature_selection + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {} + if feature_selection_model_name in ["RFE", "SequentialFeatureSelector", "SelectFromModel", "RFECV"]: + # This is an example of a model that has all the available attributes + # We could have choosen any estimator that has predict, score, + # predict_log_proba, and predict_proba + kwargs = {"estimator": AdaBoostClassifier()} + clf = getattr(sklearn.feature_selection, feature_selection_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_gaussian_process_models.py b/tests/mlmodel_sklearn/test_gaussian_process_models.py new file mode 100644 index 000000000..7a78fc703 --- /dev/null +++ b/tests/mlmodel_sklearn/test_gaussian_process_models.py @@ -0,0 +1,83 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "gaussian_process_model_name", + [ + "GaussianProcessClassifier", + "GaussianProcessRegressor", + ], +) +def test_model_methods_wrapped_in_function_trace(gaussian_process_model_name, run_gaussian_process_model): + expected_scoped_metrics = { + "GaussianProcessClassifier": [ + ("Function/MLModel/Sklearn/Named/GaussianProcessClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/GaussianProcessClassifier.predict", 1), + ("Function/MLModel/Sklearn/Named/GaussianProcessClassifier.predict_proba", 1), + ], + "GaussianProcessRegressor": [ + ("Function/MLModel/Sklearn/Named/GaussianProcessRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/GaussianProcessRegressor.predict", 1), + ], + } + + expected_transaction_name = ( + "test_gaussian_process_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_gaussian_process_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[gaussian_process_model_name], + rollup_metrics=expected_scoped_metrics[gaussian_process_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_gaussian_process_model(gaussian_process_model_name) + + _test() + + +@pytest.fixture +def run_gaussian_process_model(): + def _run(gaussian_process_model_name): + import sklearn.gaussian_process + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.gaussian_process, gaussian_process_model_name)(random_state=0) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_inference_events.py b/tests/mlmodel_sklearn/test_inference_events.py new file mode 100644 index 000000000..0a3677019 --- /dev/null +++ b/tests/mlmodel_sklearn/test_inference_events.py @@ -0,0 +1,429 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import numpy as np +import pandas +from testing_support.fixtures import ( + override_application_settings, + reset_core_stats_engine, +) +from testing_support.fixtures import override_application_settings +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_events import validate_ml_events + +from newrelic.api.background_task import background_task + +pandas_df_category_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "DecisionTreeClassifier", + "model_version": "0.0.0", + "feature.col1": 2.0, + "feature.col2": 4.0, + "label.0": "27.0", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_pandas_df_categorical_feature_event(): + @validate_ml_events(pandas_df_category_recorded_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import sklearn.tree + + clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + model = clf.fit( + pandas.DataFrame({"col1": [27.0, 24.0], "col2": [23.0, 25.0]}, dtype="category"), + pandas.DataFrame({"label": [27.0, 28.0]}), + ) + + labels = model.predict(pandas.DataFrame({"col1": [2.0], "col2": [4.0]}, dtype="category")) + return model + + _test() + + +label_type = "bool" if sys.version_info < (3, 8) else "numeric" +true_label_value = "True" if sys.version_info < (3, 8) else "1.0" +false_label_value = "False" if sys.version_info < (3, 8) else "0.0" +pandas_df_bool_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "DecisionTreeClassifier", + "model_version": "0.0.0", + "feature.col1": True, + "feature.col2": True, + "label.0": true_label_value, + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_pandas_df_bool_feature_event(): + @validate_ml_events(pandas_df_bool_recorded_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import sklearn.tree + + dtype_name = "bool" if sys.version_info < (3, 8) else "boolean" + x_train = pandas.DataFrame({"col1": [True, False], "col2": [True, False]}, dtype=dtype_name) + y_train = pandas.DataFrame({"label": [True, False]}, dtype=dtype_name) + x_test = pandas.DataFrame({"col1": [True], "col2": [True]}, dtype=dtype_name) + + clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + model = clf.fit(x_train, y_train) + + labels = model.predict(x_test) + return model + + _test() + + +pandas_df_float_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "DecisionTreeRegressor", + "model_version": "0.0.0", + "feature.col1": 100.0, + "feature.col2": 300.0, + "label.0": "345.6", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_pandas_df_float_feature_event(): + @validate_ml_events(pandas_df_float_recorded_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import sklearn.tree + + x_train = pandas.DataFrame({"col1": [120.0, 254.0], "col2": [236.9, 234.5]}, dtype="float64") + y_train = pandas.DataFrame({"label": [345.6, 456.7]}, dtype="float64") + x_test = pandas.DataFrame({"col1": [100.0], "col2": [300.0]}, dtype="float64") + + clf = getattr(sklearn.tree, "DecisionTreeRegressor")(random_state=0) + + model = clf.fit(x_train, y_train) + labels = model.predict(x_test) + + return model + + _test() + + +int_list_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "ExtraTreeRegressor", + "model_version": "0.0.0", + "feature.0": 1, + "feature.1": 2, + "label.0": "1.0", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_int_list(): + @validate_ml_events(int_list_recorded_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import sklearn.tree + + x_train = [[0, 0], [1, 1]] + y_train = [0, 1] + x_test = [[1, 2]] + + clf = getattr(sklearn.tree, "ExtraTreeRegressor")(random_state=0) + model = clf.fit(x_train, y_train) + + labels = model.predict(x_test) + return model + + _test() + + +numpy_int_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "ExtraTreeRegressor", + "model_version": "0.0.0", + "feature.0": 12, + "feature.1": 13, + "label.0": "11.0", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_numpy_int_array(): + @validate_ml_events(numpy_int_recorded_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import sklearn.tree + + x_train = np.array([[10, 10], [11, 11]], dtype="int") + y_train = np.array([10, 11], dtype="int") + x_test = np.array([[12, 13]], dtype="int") + + clf = getattr(sklearn.tree, "ExtraTreeRegressor")(random_state=0) + model = clf.fit(x_train, y_train) + + labels = model.predict(x_test) + return model + + _test() + + +numpy_str_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "DecisionTreeClassifier", + "model_version": "0.0.0", + "feature.0": "20", + "feature.1": "21", + "label.0": "21", + "new_relic_data_schema_version": 2, + }, + ), + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "DecisionTreeClassifier", + "model_version": "0.0.0", + "feature.0": "22", + "feature.1": "23", + "label.0": "21", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_numpy_str_array_multiple_features(): + @validate_ml_events(numpy_str_recorded_custom_events) + @validate_ml_event_count(count=2) + @background_task() + def _test(): + import sklearn.tree + + x_train = np.array([[20, 20], [21, 21]], dtype="._test" + if six.PY3 + else "test_kernel_ridge_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[kernel_ridge_model_name], + rollup_metrics=expected_scoped_metrics[kernel_ridge_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_kernel_ridge_model(kernel_ridge_model_name) + + _test() + + +@pytest.fixture +def run_kernel_ridge_model(): + def _run(kernel_ridge_model_name): + import sklearn.kernel_ridge + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, _ = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.kernel_ridge, kernel_ridge_model_name)() + + model = clf.fit(x_train, y_train) + model.predict(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_linear_models.py b/tests/mlmodel_sklearn/test_linear_models.py new file mode 100644 index 000000000..582a4750e --- /dev/null +++ b/tests/mlmodel_sklearn/test_linear_models.py @@ -0,0 +1,335 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "linear_model_name", + [ + "ARDRegression", + "BayesianRidge", + "ElasticNet", + "ElasticNetCV", + "HuberRegressor", + "Lars", + "LarsCV", + "Lasso", + "LassoCV", + "LassoLars", + "LassoLarsCV", + "LassoLarsIC", + "LinearRegression", + "LogisticRegression", + "LogisticRegressionCV", + "MultiTaskElasticNet", + "MultiTaskElasticNetCV", + "MultiTaskLasso", + "MultiTaskLassoCV", + "OrthogonalMatchingPursuit", + "OrthogonalMatchingPursuitCV", + "PassiveAggressiveClassifier", + "PassiveAggressiveRegressor", + "Perceptron", + "Ridge", + "RidgeCV", + "RidgeClassifier", + "RidgeClassifierCV", + "TheilSenRegressor", + "RANSACRegressor", + ], +) +def test_model_methods_wrapped_in_function_trace(linear_model_name, run_linear_model): + expected_scoped_metrics = { + "ARDRegression": [ + ("Function/MLModel/Sklearn/Named/ARDRegression.fit", 1), + ("Function/MLModel/Sklearn/Named/ARDRegression.predict", 2), + ("Function/MLModel/Sklearn/Named/ARDRegression.score", 1), + ], + "BayesianRidge": [ + ("Function/MLModel/Sklearn/Named/BayesianRidge.fit", 1), + ("Function/MLModel/Sklearn/Named/BayesianRidge.predict", 2), + ("Function/MLModel/Sklearn/Named/BayesianRidge.score", 1), + ], + "ElasticNet": [ + ("Function/MLModel/Sklearn/Named/ElasticNet.fit", 1), + ("Function/MLModel/Sklearn/Named/ElasticNet.predict", 2), + ("Function/MLModel/Sklearn/Named/ElasticNet.score", 1), + ], + "ElasticNetCV": [ + ("Function/MLModel/Sklearn/Named/ElasticNetCV.fit", 1), + ("Function/MLModel/Sklearn/Named/ElasticNetCV.predict", 2), + ("Function/MLModel/Sklearn/Named/ElasticNetCV.score", 1), + ], + "HuberRegressor": [ + ("Function/MLModel/Sklearn/Named/HuberRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/HuberRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/HuberRegressor.score", 1), + ], + "Lars": [ + ("Function/MLModel/Sklearn/Named/Lars.fit", 1), + ("Function/MLModel/Sklearn/Named/Lars.predict", 2), + ("Function/MLModel/Sklearn/Named/Lars.score", 1), + ], + "LarsCV": [ + ("Function/MLModel/Sklearn/Named/LarsCV.fit", 1), + ("Function/MLModel/Sklearn/Named/LarsCV.predict", 2), + ("Function/MLModel/Sklearn/Named/LarsCV.score", 1), + ], + "Lasso": [ + ("Function/MLModel/Sklearn/Named/Lasso.fit", 1), + ("Function/MLModel/Sklearn/Named/Lasso.predict", 2), + ("Function/MLModel/Sklearn/Named/Lasso.score", 1), + ], + "LassoCV": [ + ("Function/MLModel/Sklearn/Named/LassoCV.fit", 1), + ("Function/MLModel/Sklearn/Named/LassoCV.predict", 2), + ("Function/MLModel/Sklearn/Named/LassoCV.score", 1), + ], + "LassoLars": [ + ("Function/MLModel/Sklearn/Named/LassoLars.fit", 1), + ("Function/MLModel/Sklearn/Named/LassoLars.predict", 2), + ("Function/MLModel/Sklearn/Named/LassoLars.score", 1), + ], + "LassoLarsCV": [ + ("Function/MLModel/Sklearn/Named/LassoLarsCV.fit", 1), + ("Function/MLModel/Sklearn/Named/LassoLarsCV.predict", 2), + ("Function/MLModel/Sklearn/Named/LassoLarsCV.score", 1), + ], + "LassoLarsIC": [ + ("Function/MLModel/Sklearn/Named/LassoLarsIC.fit", 1), + ("Function/MLModel/Sklearn/Named/LassoLarsIC.predict", 2), + ("Function/MLModel/Sklearn/Named/LassoLarsIC.score", 1), + ], + "LinearRegression": [ + ("Function/MLModel/Sklearn/Named/LinearRegression.fit", 1), + ("Function/MLModel/Sklearn/Named/LinearRegression.predict", 2), + ("Function/MLModel/Sklearn/Named/LinearRegression.score", 1), + ], + "LogisticRegression": [ + ("Function/MLModel/Sklearn/Named/LogisticRegression.fit", 1), + ("Function/MLModel/Sklearn/Named/LogisticRegression.predict", 2), + ("Function/MLModel/Sklearn/Named/LogisticRegression.score", 1), + ], + "LogisticRegressionCV": [ + ("Function/MLModel/Sklearn/Named/LogisticRegressionCV.fit", 1), + ("Function/MLModel/Sklearn/Named/LogisticRegressionCV.predict", 2), + ("Function/MLModel/Sklearn/Named/LogisticRegressionCV.score", 1), + ], + "MultiTaskElasticNet": [ + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNet.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNet.predict", 2), + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNet.score", 1), + ], + "MultiTaskElasticNetCV": [ + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNetCV.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNetCV.predict", 2), + ("Function/MLModel/Sklearn/Named/MultiTaskElasticNetCV.score", 1), + ], + "MultiTaskLasso": [ + ("Function/MLModel/Sklearn/Named/MultiTaskLasso.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiTaskLasso.predict", 2), + ("Function/MLModel/Sklearn/Named/MultiTaskLasso.score", 1), + ], + "MultiTaskLassoCV": [ + ("Function/MLModel/Sklearn/Named/MultiTaskLassoCV.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiTaskLassoCV.predict", 2), + ("Function/MLModel/Sklearn/Named/MultiTaskLassoCV.score", 1), + ], + "OrthogonalMatchingPursuit": [ + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuit.fit", 1), + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuit.predict", 2), + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuit.score", 1), + ], + "OrthogonalMatchingPursuitCV": [ + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuitCV.fit", 1), + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuitCV.predict", 2), + ("Function/MLModel/Sklearn/Named/OrthogonalMatchingPursuitCV.score", 1), + ], + "PassiveAggressiveClassifier": [ + ("Function/MLModel/Sklearn/Named/PassiveAggressiveClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/PassiveAggressiveClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/PassiveAggressiveClassifier.score", 1), + ], + "PassiveAggressiveRegressor": [ + ("Function/MLModel/Sklearn/Named/PassiveAggressiveRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/PassiveAggressiveRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/PassiveAggressiveRegressor.score", 1), + ], + "Perceptron": [ + ("Function/MLModel/Sklearn/Named/Perceptron.fit", 1), + ("Function/MLModel/Sklearn/Named/Perceptron.predict", 2), + ("Function/MLModel/Sklearn/Named/Perceptron.score", 1), + ], + "Ridge": [ + ("Function/MLModel/Sklearn/Named/Ridge.fit", 1), + ("Function/MLModel/Sklearn/Named/Ridge.predict", 2), + ("Function/MLModel/Sklearn/Named/Ridge.score", 1), + ], + "RidgeCV": [ + ("Function/MLModel/Sklearn/Named/RidgeCV.fit", 1), + ("Function/MLModel/Sklearn/Named/RidgeCV.predict", 2), + ("Function/MLModel/Sklearn/Named/RidgeCV.score", 1), + ], + "RidgeClassifier": [ + ("Function/MLModel/Sklearn/Named/RidgeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/RidgeClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/RidgeClassifier.score", 1), + ], + "RidgeClassifierCV": [ + ("Function/MLModel/Sklearn/Named/RidgeClassifierCV.fit", 1), + ("Function/MLModel/Sklearn/Named/RidgeClassifierCV.predict", 2), + ("Function/MLModel/Sklearn/Named/RidgeClassifierCV.score", 1), + ], + "TheilSenRegressor": [ + ("Function/MLModel/Sklearn/Named/TheilSenRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/TheilSenRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/TheilSenRegressor.score", 1), + ], + "RANSACRegressor": [ + ("Function/MLModel/Sklearn/Named/RANSACRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/RANSACRegressor.predict", 1), + ("Function/MLModel/Sklearn/Named/RANSACRegressor.score", 1), + ], + } + expected_transaction_name = "test_linear_models:_test" + if six.PY3: + expected_transaction_name = "test_linear_models:test_model_methods_wrapped_in_function_trace.._test" + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[linear_model_name], + rollup_metrics=expected_scoped_metrics[linear_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_linear_model(linear_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 1, 0), reason="Requires sklearn >= v1.1") +@pytest.mark.parametrize( + "linear_model_name", + [ + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", + "QuantileRegressor", + "SGDClassifier", + "SGDRegressor", + "SGDOneClassSVM", + ], +) +def test_above_v1_1_model_methods_wrapped_in_function_trace(linear_model_name, run_linear_model): + expected_scoped_metrics = { + "PoissonRegressor": [ + ("Function/MLModel/Sklearn/Named/PoissonRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/PoissonRegressor.predict", 1), + ("Function/MLModel/Sklearn/Named/PoissonRegressor.score", 1), + ], + "GammaRegressor": [ + ("Function/MLModel/Sklearn/Named/GammaRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/GammaRegressor.predict", 1), + ("Function/MLModel/Sklearn/Named/GammaRegressor.score", 1), + ], + "TweedieRegressor": [ + ("Function/MLModel/Sklearn/Named/TweedieRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/TweedieRegressor.predict", 1), + ("Function/MLModel/Sklearn/Named/TweedieRegressor.score", 1), + ], + "QuantileRegressor": [ + ("Function/MLModel/Sklearn/Named/QuantileRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/QuantileRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/QuantileRegressor.score", 1), + ], + "SGDClassifier": [ + ("Function/MLModel/Sklearn/Named/SGDClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/SGDClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/SGDClassifier.score", 1), + ], + "SGDRegressor": [ + ("Function/MLModel/Sklearn/Named/SGDRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/SGDRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/SGDRegressor.score", 1), + ], + "SGDOneClassSVM": [ + ("Function/MLModel/Sklearn/Named/SGDOneClassSVM.fit", 1), + ("Function/MLModel/Sklearn/Named/SGDOneClassSVM.predict", 1), + ], + } + expected_transaction_name = "test_linear_models:_test" + if six.PY3: + expected_transaction_name = ( + "test_linear_models:test_above_v1_1_model_methods_wrapped_in_function_trace.._test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[linear_model_name], + rollup_metrics=expected_scoped_metrics[linear_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_linear_model(linear_model_name) + + _test() + + +@pytest.fixture +def run_linear_model(): + def _run(linear_model_name): + import sklearn.linear_model + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + if linear_model_name == "GammaRegressor": + x_train = [[1, 2], [2, 3], [3, 4], [4, 3]] + y_train = [19, 26, 33, 30] + x_test = [[1, 2], [2, 3], [3, 4], [4, 3]] + y_test = [19, 26, 33, 30] + elif linear_model_name in [ + "MultiTaskElasticNet", + "MultiTaskElasticNetCV", + "MultiTaskLasso", + "MultiTaskLassoCV", + ]: + y_train = x_train + y_test = x_test + + clf = getattr(sklearn.linear_model, linear_model_name)() + + model = clf.fit(x_train, y_train) + model.predict(x_test) + + if hasattr(model, "score"): + model.score(x_test, y_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_metric_scorers.py b/tests/mlmodel_sklearn/test_metric_scorers.py new file mode 100644 index 000000000..50557b882 --- /dev/null +++ b/tests/mlmodel_sklearn/test_metric_scorers.py @@ -0,0 +1,150 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import pytest +from testing_support.fixtures import validate_attributes + +from newrelic.api.background_task import background_task +from newrelic.hooks.mlmodel_sklearn import PredictReturnTypeProxy + + +@pytest.mark.parametrize( + "metric_scorer_name", + ( + "accuracy_score", + "balanced_accuracy_score", + "f1_score", + "precision_score", + "recall_score", + "roc_auc_score", + "r2_score", + ), +) +def test_metric_scorer_attributes(metric_scorer_name, run_metric_scorer): + @validate_attributes("agent", ["DecisionTreeClassifier/TrainingStep/0/%s" % metric_scorer_name]) + @background_task() + def _test(): + run_metric_scorer(metric_scorer_name) + + _test() + + +@pytest.mark.parametrize( + "metric_scorer_name", + ( + "accuracy_score", + "balanced_accuracy_score", + "f1_score", + "precision_score", + "recall_score", + "roc_auc_score", + "r2_score", + ), +) +def test_metric_scorer_training_steps_attributes(metric_scorer_name, run_metric_scorer): + @validate_attributes( + "agent", + [ + "DecisionTreeClassifier/TrainingStep/0/%s" % metric_scorer_name, + "DecisionTreeClassifier/TrainingStep/1/%s" % metric_scorer_name, + ], + ) + @background_task() + def _test(): + run_metric_scorer(metric_scorer_name, training_steps=[0, 1]) + + _test() + + +@pytest.mark.parametrize( + "metric_scorer_name,kwargs", + [ + ("f1_score", {"average": None}), + ("precision_score", {"average": None}), + ("recall_score", {"average": None}), + ], +) +def test_metric_scorer_iterable_score_attributes(metric_scorer_name, kwargs, run_metric_scorer): + @validate_attributes( + "agent", + [ + "DecisionTreeClassifier/TrainingStep/0/%s[0]" % metric_scorer_name, + "DecisionTreeClassifier/TrainingStep/0/%s[1]" % metric_scorer_name, + ], + ) + @background_task() + def _test(): + run_metric_scorer(metric_scorer_name, kwargs) + + _test() + + +@pytest.mark.parametrize( + "metric_scorer_name", + [ + "accuracy_score", + "balanced_accuracy_score", + "f1_score", + "precision_score", + "recall_score", + "roc_auc_score", + "r2_score", + ], +) +def test_metric_scorer_attributes_unknown_model(metric_scorer_name): + @validate_attributes("agent", ["Unknown/TrainingStep/Unknown/%s" % metric_scorer_name]) + @background_task() + def _test(): + from sklearn import metrics + + y_pred = [1, 0] + y_test = [1, 0] + + getattr(metrics, metric_scorer_name)(y_test, y_pred) + + _test() + + +@pytest.mark.parametrize("data", (np.array([0, 1]), "foo", 1, 1.0, True, [0, 1], {"foo": "bar"}, (0, 1), np.str_("F"))) +def test_PredictReturnTypeProxy(data): + wrapped_data = PredictReturnTypeProxy(data, "ModelName", 0) + + assert wrapped_data._nr_model_name == "ModelName" + assert wrapped_data._nr_training_step == 0 + + +@pytest.fixture +def run_metric_scorer(): + def _run(metric_scorer_name, metric_scorer_kwargs=None, training_steps=None): + from sklearn import metrics, tree + + x_train = [[0, 0], [1, 1]] + y_train = [0, 1] + x_test = [[2.0, 2.0], [0, 0.5]] + y_test = [1, 0] + + if not training_steps: + training_steps = [0] + + clf = tree.DecisionTreeClassifier(random_state=0) + for step in training_steps: + model = clf.fit(x_train, y_train) + + labels = model.predict(x_test) + + metric_scorer_kwargs = metric_scorer_kwargs or {} + getattr(metrics, metric_scorer_name)(y_test, labels, **metric_scorer_kwargs) + + return _run diff --git a/tests/mlmodel_sklearn/test_mixture_models.py b/tests/mlmodel_sklearn/test_mixture_models.py new file mode 100644 index 000000000..7ef838126 --- /dev/null +++ b/tests/mlmodel_sklearn/test_mixture_models.py @@ -0,0 +1,85 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "mixture_model_name", + [ + "GaussianMixture", + "BayesianGaussianMixture", + ], +) +def test_model_methods_wrapped_in_function_trace(mixture_model_name, run_mixture_model): + expected_scoped_metrics = { + "GaussianMixture": [ + ("Function/MLModel/Sklearn/Named/GaussianMixture.fit", 1), + ("Function/MLModel/Sklearn/Named/GaussianMixture.predict", 1), + ("Function/MLModel/Sklearn/Named/GaussianMixture.predict_proba", 1), + ("Function/MLModel/Sklearn/Named/GaussianMixture.score", 1), + ], + "BayesianGaussianMixture": [ + ("Function/MLModel/Sklearn/Named/BayesianGaussianMixture.fit", 1), + ("Function/MLModel/Sklearn/Named/BayesianGaussianMixture.predict", 1), + ("Function/MLModel/Sklearn/Named/BayesianGaussianMixture.predict_proba", 1), + ("Function/MLModel/Sklearn/Named/BayesianGaussianMixture.score", 1), + ], + } + + expected_transaction_name = ( + "test_mixture_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_mixture_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[mixture_model_name], + rollup_metrics=expected_scoped_metrics[mixture_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_mixture_model(mixture_model_name) + + _test() + + +@pytest.fixture +def run_mixture_model(): + def _run(mixture_model_name): + import sklearn.mixture + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.mixture, mixture_model_name)() + + model = clf.fit(x_train, y_train) + model.predict(x_test) + model.score(x_test, y_test) + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_ml_model.py b/tests/mlmodel_sklearn/test_ml_model.py new file mode 100644 index 000000000..cfb8e79a6 --- /dev/null +++ b/tests/mlmodel_sklearn/test_ml_model.py @@ -0,0 +1,337 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +import pandas +from testing_support.fixtures import reset_core_stats_engine +from testing_support.validators.validate_ml_event_count import validate_ml_event_count +from testing_support.validators.validate_ml_events import validate_ml_events + +from newrelic.api.background_task import background_task +from newrelic.api.ml_model import wrap_mlmodel + +try: + from sklearn.tree._classes import BaseDecisionTree +except ImportError: + from sklearn.tree.tree import BaseDecisionTree + +_logger = logging.getLogger(__name__) + + +# Create custom model that isn't auto-instrumented to validate ml_model wrapper functionality +class CustomTestModel(BaseDecisionTree): + def __init__( + self, + criterion="poisson", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=0, + max_leaf_nodes=None, + min_impurity_decrease=0.0, + class_weight=None, + ccp_alpha=0.0, + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + max_leaf_nodes=max_leaf_nodes, + class_weight=class_weight, + random_state=random_state, + min_impurity_decrease=min_impurity_decrease, + ccp_alpha=ccp_alpha, + ) + + def fit(self, X, y, sample_weight=None, check_input=True): + if hasattr(super(CustomTestModel, self), "_fit"): + return self._fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + else: + return super(CustomTestModel, self).fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + ) + + def predict(self, X, check_input=True): + return super(CustomTestModel, self).predict(X, check_input=check_input) + + +int_list_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "MyCustomModel", + "model_version": "1.2.3", + "feature.0": 1.0, + "feature.1": 2.0, + "label.0": "0.5", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_custom_model_int_list_no_features_and_labels(): + @validate_ml_event_count(count=1) + @validate_ml_events(int_list_recorded_custom_events) + @background_task() + def _test(): + x_train = [[0, 0], [1, 1]] + y_train = [0, 1] + x_test = [[1.0, 2.0]] + + model = CustomTestModel().fit(x_train, y_train) + wrap_mlmodel(model, name="MyCustomModel", version="1.2.3") + + labels = model.predict(x_test) + + return model + + _test() + + +int_list_recorded_custom_events_with_metadata = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "MyCustomModel", + "model_version": "1.2.3", + "feature.0": 1.0, + "feature.1": 2.0, + "label.0": "0.5", + "new_relic_data_schema_version": 2, + "metadata1": "value1", + "metadata2": "value2", + }, + ), +] + + +@reset_core_stats_engine() +def test_custom_model_int_list_with_metadata(): + @validate_ml_event_count(count=1) + @validate_ml_events(int_list_recorded_custom_events_with_metadata) + @background_task() + def _test(): + x_train = [[0, 0], [1, 1]] + y_train = [0, 1] + x_test = [[1.0, 2.0]] + + model = CustomTestModel().fit(x_train, y_train) + wrap_mlmodel( + model, + name="MyCustomModel", + version="1.2.3", + metadata={"metadata1": "value1", "metadata2": "value2"}, + ) + + labels = model.predict(x_test) + + return model + + _test() + + +pandas_df_recorded_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "PandasTestModel", + "model_version": "1.5.0b1", + "feature.feature1": 0, + "feature.feature2": 0, + "feature.feature3": 1, + "label.label1": "0.5", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_wrapper_attrs_custom_model_pandas_df(): + @validate_ml_event_count(count=1) + @validate_ml_events(pandas_df_recorded_custom_events) + @background_task() + def _test(): + x_train = pandas.DataFrame({"col1": [0, 1], "col2": [0, 1], "col3": [1, 2]}, dtype="category") + y_train = [0, 1] + x_test = pandas.DataFrame({"col1": [0], "col2": [0], "col3": [1]}, dtype="category") + + model = CustomTestModel(random_state=0).fit(x_train, y_train) + wrap_mlmodel( + model, + name="PandasTestModel", + version="1.5.0b1", + feature_names=["feature1", "feature2", "feature3"], + label_names=["label1"], + ) + model.predict(x_test) + return model + + _test() + + +pandas_df_recorded_builtin_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "MyDecisionTreeClassifier", + "model_version": "1.5.0b1", + "feature.feature1": 12, + "feature.feature2": 14, + "label.label1": "0", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_wrapper_attrs_builtin_model(): + @validate_ml_event_count(count=1) + @validate_ml_events(pandas_df_recorded_builtin_events) + @background_task() + def _test(): + import sklearn.tree + + x_train = pandas.DataFrame({"col1": [0, 0], "col2": [1, 1]}, dtype="int") + y_train = pandas.DataFrame({"label": [0, 1]}, dtype="int") + x_test = pandas.DataFrame({"col1": [12], "col2": [14]}, dtype="int") + + clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + + model = clf.fit(x_train, y_train) + wrap_mlmodel( + model, + name="MyDecisionTreeClassifier", + version="1.5.0b1", + feature_names=["feature1", "feature2"], + label_names=["label1"], + ) + model.predict(x_test) + + return model + + _test() + + +pandas_df_mismatched_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "MyDecisionTreeClassifier", + "model_version": "1.5.0b1", + "feature.col1": 12, + "feature.col2": 14, + "feature.col3": 16, + "label.0": "1", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_wrapper_mismatched_features_and_labels_df(): + @validate_ml_event_count(count=1) + @validate_ml_events(pandas_df_mismatched_custom_events) + @background_task() + def _test(): + import sklearn.tree + + x_train = pandas.DataFrame({"col1": [7, 8], "col2": [9, 10], "col3": [24, 25]}, dtype="int") + y_train = pandas.DataFrame({"label": [0, 1]}, dtype="int") + x_test = pandas.DataFrame({"col1": [12], "col2": [14], "col3": [16]}, dtype="int") + + clf = getattr(sklearn.tree, "DecisionTreeClassifier")(random_state=0) + + model = clf.fit(x_train, y_train) + wrap_mlmodel( + model, + name="MyDecisionTreeClassifier", + version="1.5.0b1", + feature_names=["feature1", "feature2"], + label_names=["label1", "label2"], + ) + model.predict(x_test) + return model + + _test() + + +numpy_str_mismatched_custom_events = [ + ( + {"type": "InferenceData"}, + { + "inference_id": None, + "prediction_id": None, + "modelName": "MyDecisionTreeClassifier", + "model_version": "0.0.1", + "feature.0": "20", + "feature.1": "21", + "label.0": "21", + "new_relic_data_schema_version": 2, + }, + ), +] + + +@reset_core_stats_engine() +def test_wrapper_mismatched_features_and_labels_np_array(): + @validate_ml_events(numpy_str_mismatched_custom_events) + @validate_ml_event_count(count=1) + @background_task() + def _test(): + import numpy as np + import sklearn.tree + + x_train = np.array([[20, 20], [21, 21]], dtype="._test" + if six.PY3 + else "test_model_selection_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[model_selection_model_name], + rollup_metrics=expected_scoped_metrics[model_selection_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_model_selection_model(model_selection_model_name) + + _test() + + +@pytest.fixture +def run_model_selection_model(): + def _run(model_selection_model_name): + import sklearn.model_selection + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + if model_selection_model_name == "GridSearchCV": + kwargs = {"estimator": AdaBoostClassifier(), "param_grid": {}} + else: + kwargs = {"estimator": AdaBoostClassifier(), "param_distributions": {}} + clf = getattr(sklearn.model_selection, model_selection_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_multiclass_models.py b/tests/mlmodel_sklearn/test_multiclass_models.py new file mode 100644 index 000000000..dd10d76f1 --- /dev/null +++ b/tests/mlmodel_sklearn/test_multiclass_models.py @@ -0,0 +1,91 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.ensemble import AdaBoostClassifier +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +@pytest.mark.parametrize( + "multiclass_model_name", + [ + "OneVsRestClassifier", + "OneVsOneClassifier", + "OutputCodeClassifier", + ], +) +def test_model_methods_wrapped_in_function_trace(multiclass_model_name, run_multiclass_model): + expected_scoped_metrics = { + "OneVsRestClassifier": [ + ("Function/MLModel/Sklearn/Named/OneVsRestClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/OneVsRestClassifier.predict", 1), + ("Function/MLModel/Sklearn/Named/OneVsRestClassifier.predict_proba", 1), + ], + "OneVsOneClassifier": [ + ("Function/MLModel/Sklearn/Named/OneVsOneClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/OneVsOneClassifier.predict", 1), + ], + "OutputCodeClassifier": [ + ("Function/MLModel/Sklearn/Named/OutputCodeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/OutputCodeClassifier.predict", 1), + ], + } + + expected_transaction_name = ( + "test_multiclass_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_multiclass_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[multiclass_model_name], + rollup_metrics=expected_scoped_metrics[multiclass_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_multiclass_model(multiclass_model_name) + + _test() + + +@pytest.fixture +def run_multiclass_model(): + def _run(multiclass_model_name): + import sklearn.multiclass + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + # This is an example of a model that has all the available attributes + # We could have choosen any estimator that has predict, score, + # predict_log_proba, and predict_proba + clf = getattr(sklearn.multiclass, multiclass_model_name)(estimator=AdaBoostClassifier()) + + model = clf.fit(x_train, y_train) + model.predict(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_multioutput_models.py b/tests/mlmodel_sklearn/test_multioutput_models.py new file mode 100644 index 000000000..392328f28 --- /dev/null +++ b/tests/mlmodel_sklearn/test_multioutput_models.py @@ -0,0 +1,129 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn import __init__ # noqa: Needed for get_package_version +from sklearn.ensemble import AdaBoostClassifier +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +# Python 2 will not allow instantiation of abstract class +# (abstract method is __init__ here) +@pytest.mark.skipif(SKLEARN_VERSION >= (1, 0, 0) or six.PY2, reason="Requires sklearn < 1.0 and Python3") +@pytest.mark.parametrize( + "multioutput_model_name", + [ + "MultiOutputEstimator", + ], +) +def test_below_v1_0_model_methods_wrapped_in_function_trace(multioutput_model_name, run_multioutput_model): + expected_scoped_metrics = { + "MultiOutputEstimator": [ + ("Function/MLModel/Sklearn/Named/MultiOutputEstimator.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiOutputEstimator.predict", 2), + ], + } + expected_transaction_name = ( + "test_multioutput_models:test_below_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_multioutput_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[multioutput_model_name], + rollup_metrics=expected_scoped_metrics[multioutput_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_multioutput_model(multioutput_model_name) + + _test() + + +@pytest.mark.parametrize( + "multioutput_model_name", + [ + "MultiOutputClassifier", + "ClassifierChain", + "RegressorChain", + ], +) +def test_above_v1_0_model_methods_wrapped_in_function_trace(multioutput_model_name, run_multioutput_model): + expected_scoped_metrics = { + "MultiOutputClassifier": [ + ("Function/MLModel/Sklearn/Named/MultiOutputClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/MultiOutputClassifier.predict_proba", 1), + ("Function/MLModel/Sklearn/Named/MultiOutputClassifier.score", 1), + ], + "ClassifierChain": [ + ("Function/MLModel/Sklearn/Named/ClassifierChain.fit", 1), + ("Function/MLModel/Sklearn/Named/ClassifierChain.predict_proba", 1), + ], + "RegressorChain": [ + ("Function/MLModel/Sklearn/Named/RegressorChain.fit", 1), + ], + } + expected_transaction_name = ( + "test_multioutput_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_multioutput_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[multioutput_model_name], + rollup_metrics=expected_scoped_metrics[multioutput_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_multioutput_model(multioutput_model_name) + + _test() + + +@pytest.fixture +def run_multioutput_model(): + def _run(multioutput_model_name): + import sklearn.multioutput + from sklearn.datasets import make_multilabel_classification + + X, y = make_multilabel_classification(n_classes=3, random_state=0) + + kwargs = {"estimator": AdaBoostClassifier()} + if multioutput_model_name in ["RegressorChain", "ClassifierChain"]: + kwargs = {"base_estimator": AdaBoostClassifier()} + clf = getattr(sklearn.multioutput, multioutput_model_name)(**kwargs) + + model = clf.fit(X, y) + if hasattr(model, "predict"): + model.predict(X) + if hasattr(model, "score"): + model.score(X, y) + if hasattr(model, "predict_proba"): + model.predict_proba(X) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_naive_bayes_models.py b/tests/mlmodel_sklearn/test_naive_bayes_models.py new file mode 100644 index 000000000..22dc6db1b --- /dev/null +++ b/tests/mlmodel_sklearn/test_naive_bayes_models.py @@ -0,0 +1,141 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn import __init__ # noqa: needed for get_package_version +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 0, 0), reason="Requires sklearn >= 1.0") +@pytest.mark.parametrize( + "naive_bayes_model_name", + [ + "CategoricalNB", + ], +) +def test_above_v1_0_model_methods_wrapped_in_function_trace(naive_bayes_model_name, run_naive_bayes_model): + expected_scoped_metrics = { + "CategoricalNB": [ + ("Function/MLModel/Sklearn/Named/CategoricalNB.fit", 1), + ("Function/MLModel/Sklearn/Named/CategoricalNB.predict", 1), + ("Function/MLModel/Sklearn/Named/CategoricalNB.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/CategoricalNB.predict_proba", 1), + ], + } + expected_transaction_name = ( + "test_naive_bayes_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_naive_bayes_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[naive_bayes_model_name], + rollup_metrics=expected_scoped_metrics[naive_bayes_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_naive_bayes_model(naive_bayes_model_name) + + _test() + + +@pytest.mark.parametrize( + "naive_bayes_model_name", + [ + "GaussianNB", + "MultinomialNB", + "ComplementNB", + "BernoulliNB", + ], +) +def test_model_methods_wrapped_in_function_trace(naive_bayes_model_name, run_naive_bayes_model): + expected_scoped_metrics = { + "GaussianNB": [ + ("Function/MLModel/Sklearn/Named/GaussianNB.fit", 1), + ("Function/MLModel/Sklearn/Named/GaussianNB.predict", 1), + ("Function/MLModel/Sklearn/Named/GaussianNB.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/GaussianNB.predict_proba", 1), + ], + "MultinomialNB": [ + ("Function/MLModel/Sklearn/Named/MultinomialNB.fit", 1), + ("Function/MLModel/Sklearn/Named/MultinomialNB.predict", 1), + ("Function/MLModel/Sklearn/Named/MultinomialNB.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/MultinomialNB.predict_proba", 1), + ], + "ComplementNB": [ + ("Function/MLModel/Sklearn/Named/ComplementNB.fit", 1), + ("Function/MLModel/Sklearn/Named/ComplementNB.predict", 1), + ("Function/MLModel/Sklearn/Named/ComplementNB.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/ComplementNB.predict_proba", 1), + ], + "BernoulliNB": [ + ("Function/MLModel/Sklearn/Named/BernoulliNB.fit", 1), + ("Function/MLModel/Sklearn/Named/BernoulliNB.predict", 1), + ("Function/MLModel/Sklearn/Named/BernoulliNB.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/BernoulliNB.predict_proba", 1), + ], + } + + expected_transaction_name = ( + "test_naive_bayes_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_naive_bayes_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[naive_bayes_model_name], + rollup_metrics=expected_scoped_metrics[naive_bayes_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_naive_bayes_model(naive_bayes_model_name) + + _test() + + +@pytest.fixture +def run_naive_bayes_model(): + def _run(naive_bayes_model_name): + import sklearn.naive_bayes + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.naive_bayes, naive_bayes_model_name)() + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_neighbors_models.py b/tests/mlmodel_sklearn/test_neighbors_models.py new file mode 100644 index 000000000..53a521157 --- /dev/null +++ b/tests/mlmodel_sklearn/test_neighbors_models.py @@ -0,0 +1,172 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.neighbors import __init__ # noqa: Needed for get_package_version +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "neighbors_model_name", + [ + "KNeighborsClassifier", + "RadiusNeighborsClassifier", + "KernelDensity", + "LocalOutlierFactor", + "NearestCentroid", + "KNeighborsRegressor", + "RadiusNeighborsRegressor", + "NearestNeighbors", + ], +) +def test_model_methods_wrapped_in_function_trace(neighbors_model_name, run_neighbors_model): + expected_scoped_metrics = { + "KNeighborsClassifier": [ + ("Function/MLModel/Sklearn/Named/KNeighborsClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/KNeighborsClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/KNeighborsClassifier.predict_proba", 1), + ], + "RadiusNeighborsClassifier": [ + ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.predict", 2), + ], + "KernelDensity": [ + ("Function/MLModel/Sklearn/Named/KernelDensity.fit", 1), + ("Function/MLModel/Sklearn/Named/KernelDensity.score", 1), + ], + "LocalOutlierFactor": [ + ("Function/MLModel/Sklearn/Named/LocalOutlierFactor.fit", 1), + ("Function/MLModel/Sklearn/Named/LocalOutlierFactor.predict", 1), + ], + "NearestCentroid": [ + ("Function/MLModel/Sklearn/Named/NearestCentroid.fit", 1), + ("Function/MLModel/Sklearn/Named/NearestCentroid.predict", 2), + ], + "KNeighborsRegressor": [ + ("Function/MLModel/Sklearn/Named/KNeighborsRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/KNeighborsRegressor.predict", 2), + ], + "RadiusNeighborsRegressor": [ + ("Function/MLModel/Sklearn/Named/RadiusNeighborsRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/RadiusNeighborsRegressor.predict", 2), + ], + "NearestNeighbors": [ + ("Function/MLModel/Sklearn/Named/NearestNeighbors.fit", 1), + ], + } + + expected_transaction_name = ( + "test_neighbors_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_neighbors_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[neighbors_model_name], + rollup_metrics=expected_scoped_metrics[neighbors_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_neighbors_model(neighbors_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 0, 0), reason="Requires sklearn >= 1.0") +@pytest.mark.parametrize( + "neighbors_model_name", + [ + "KNeighborsTransformer", + "RadiusNeighborsTransformer", + "NeighborhoodComponentsAnalysis", + "RadiusNeighborsClassifier", + ], +) +def test_above_v1_0_model_methods_wrapped_in_function_trace(neighbors_model_name, run_neighbors_model): + expected_scoped_metrics = { + "KNeighborsTransformer": [ + ("Function/MLModel/Sklearn/Named/KNeighborsTransformer.fit", 1), + ("Function/MLModel/Sklearn/Named/KNeighborsTransformer.transform", 1), + ], + "RadiusNeighborsTransformer": [ + ("Function/MLModel/Sklearn/Named/RadiusNeighborsTransformer.fit", 1), + ("Function/MLModel/Sklearn/Named/RadiusNeighborsTransformer.transform", 1), + ], + "NeighborhoodComponentsAnalysis": [ + ("Function/MLModel/Sklearn/Named/NeighborhoodComponentsAnalysis.fit", 1), + ("Function/MLModel/Sklearn/Named/NeighborhoodComponentsAnalysis.transform", 1), + ], + "RadiusNeighborsClassifier": [ + ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/RadiusNeighborsClassifier.predict_proba", 3), # Added in v1.0 + ], + } + expected_transaction_name = ( + "test_neighbors_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_neighbors_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[neighbors_model_name], + rollup_metrics=expected_scoped_metrics[neighbors_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_neighbors_model(neighbors_model_name) + + _test() + + +@pytest.fixture +def run_neighbors_model(): + def _run(neighbors_model_name): + import sklearn.neighbors + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {} + if neighbors_model_name == "LocalOutlierFactor": + kwargs = {"novelty": True} + clf = getattr(sklearn.neighbors, neighbors_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_neural_network_models.py b/tests/mlmodel_sklearn/test_neural_network_models.py new file mode 100644 index 000000000..468bfb4b9 --- /dev/null +++ b/tests/mlmodel_sklearn/test_neural_network_models.py @@ -0,0 +1,96 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "neural_network_model_name", + [ + "MLPClassifier", + "MLPRegressor", + "BernoulliRBM", + ], +) +def test_model_methods_wrapped_in_function_trace(neural_network_model_name, run_neural_network_model): + expected_scoped_metrics = { + "MLPClassifier": [ + ("Function/MLModel/Sklearn/Named/MLPClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/MLPClassifier.predict", 1), + ("Function/MLModel/Sklearn/Named/MLPClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/MLPClassifier.predict_proba", 2), + ], + "MLPRegressor": [ + ("Function/MLModel/Sklearn/Named/MLPRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/MLPRegressor.predict", 1), + ], + "BernoulliRBM": [ + ("Function/MLModel/Sklearn/Named/BernoulliRBM.fit", 1), + ("Function/MLModel/Sklearn/Named/BernoulliRBM.transform", 1), + ], + } + + expected_transaction_name = ( + "test_neural_network_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_neural_network_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[neural_network_model_name], + rollup_metrics=expected_scoped_metrics[neural_network_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_neural_network_model(neural_network_model_name) + + _test() + + +@pytest.fixture +def run_neural_network_model(): + def _run(neural_network_model_name): + import sklearn.neural_network + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + clf = getattr(sklearn.neural_network, neural_network_model_name)() + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_pipeline_models.py b/tests/mlmodel_sklearn/test_pipeline_models.py new file mode 100644 index 000000000..ac9b918f4 --- /dev/null +++ b/tests/mlmodel_sklearn/test_pipeline_models.py @@ -0,0 +1,95 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from sklearn.decomposition import TruncatedSVD +from sklearn.preprocessing import StandardScaler +from sklearn.svm import SVC +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "pipeline_model_name", + [ + "Pipeline", + "FeatureUnion", + ], +) +def test_model_methods_wrapped_in_function_trace(pipeline_model_name, run_pipeline_model): + expected_scoped_metrics = { + "Pipeline": [ + ("Function/MLModel/Sklearn/Named/Pipeline.fit", 1), + ("Function/MLModel/Sklearn/Named/Pipeline.predict", 1), + ("Function/MLModel/Sklearn/Named/Pipeline.score", 1), + ], + "FeatureUnion": [ + ("Function/MLModel/Sklearn/Named/FeatureUnion.fit", 1), + ("Function/MLModel/Sklearn/Named/FeatureUnion.transform", 1), + ], + } + + expected_transaction_name = ( + "test_pipeline_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_pipeline_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[pipeline_model_name], + rollup_metrics=expected_scoped_metrics[pipeline_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_pipeline_model(pipeline_model_name) + + _test() + + +@pytest.fixture +def run_pipeline_model(): + def _run(pipeline_model_name): + import sklearn.pipeline + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + if pipeline_model_name == "Pipeline": + kwargs = {"steps": [("scaler", StandardScaler()), ("svc", SVC())]} + else: + kwargs = {"transformer_list": [("scaler", StandardScaler()), ("svd", TruncatedSVD(n_components=2))]} + clf = getattr(sklearn.pipeline, pipeline_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "transform"): + model.transform(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_prediction_stats.py b/tests/mlmodel_sklearn/test_prediction_stats.py new file mode 100644 index 000000000..5538119e7 --- /dev/null +++ b/tests/mlmodel_sklearn/test_prediction_stats.py @@ -0,0 +1,519 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +import numpy as np +import pandas as pd +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task + +# This will act as the UUID for `prediction_id` +ML_METRIC_FORCED_UUID = "0b59992f-2349-4a46-8de1-696d3fe1088b" + + +@pytest.fixture(scope="function") +def force_uuid(monkeypatch): + monkeypatch.setattr(uuid, "uuid4", lambda *a, **k: ML_METRIC_FORCED_UUID) + + +_test_prediction_stats_tags = frozenset( + {("modelName", "DummyClassifier"), ("prediction_id", ML_METRIC_FORCED_UUID), ("model_version", "0.0.0")} +) + + +@pytest.mark.parametrize( + "x_train,y_train,x_test,metrics", + [ + ( + [[0, 0], [1, 1]], + [0, 1], + [[2.0, 2.0], [0, 0.5]], + [ + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Mean", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile25", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile50", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile75", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Count", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Mean", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile25", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile50", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile75", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Count", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Mean", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile25", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile50", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile75", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Count", _test_prediction_stats_tags, 1), + ], + ), + ( + np.array([[0, 0], [1, 1]]), + [0, 1], + np.array([[2.0, 2.0], [0, 0.5]]), + [ + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Mean", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile25", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile50", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Percentile75", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/0/Count", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Mean", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile25", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile50", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Percentile75", + _test_prediction_stats_tags, + 1, + ), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Feature/1/Count", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Mean", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile25", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile50", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Percentile75", _test_prediction_stats_tags, 1), + ( + "MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/StandardDeviation", + _test_prediction_stats_tags, + 1, + ), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Min", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Max", _test_prediction_stats_tags, 1), + ("MLModel/Sklearn/Named/DummyClassifier/Predict/Label/0/Count", _test_prediction_stats_tags, 1), + ], + ), + ( + np.array([["a", 0, 4], ["b", 1, 3]], dtype="._test" + if six.PY3 + else "test_semi_supervised_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[semi_supervised_model_name], + rollup_metrics=expected_scoped_metrics[semi_supervised_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_semi_supervised_model(semi_supervised_model_name) + + _test() + + +@pytest.mark.skipif(SKLEARN_VERSION < (1, 0, 0), reason="Requires sklearn <= 1.0") +@pytest.mark.parametrize( + "semi_supervised_model_name", + [ + "SelfTrainingClassifier", + ], +) +def test_above_v1_0_model_methods_wrapped_in_function_trace(semi_supervised_model_name, run_semi_supervised_model): + expected_scoped_metrics = { + "SelfTrainingClassifier": [ + ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.predict", 1), + ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/SelfTrainingClassifier.predict_proba", 1), + ], + } + expected_transaction_name = ( + "test_semi_supervised_models:test_above_v1_0_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_semi_supervised_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[semi_supervised_model_name], + rollup_metrics=expected_scoped_metrics[semi_supervised_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_semi_supervised_model(semi_supervised_model_name) + + _test() + + +@pytest.fixture +def run_semi_supervised_model(): + def _run(semi_supervised_model_name): + import sklearn.semi_supervised + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + if semi_supervised_model_name == "SelfTrainingClassifier": + kwargs = {"base_estimator": AdaBoostClassifier()} + else: + kwargs = {} + clf = getattr(sklearn.semi_supervised, semi_supervised_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + if hasattr(model, "predict"): + model.predict(x_test) + if hasattr(model, "score"): + model.score(x_test, y_test) + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_svm_models.py b/tests/mlmodel_sklearn/test_svm_models.py new file mode 100644 index 000000000..fe95f2f46 --- /dev/null +++ b/tests/mlmodel_sklearn/test_svm_models.py @@ -0,0 +1,110 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.common.package_version_utils import get_package_version +from newrelic.packages import six + +SKLEARN_VERSION = tuple(map(int, get_package_version("sklearn").split("."))) + + +@pytest.mark.parametrize( + "svm_model_name", + [ + "LinearSVC", + "LinearSVR", + "SVC", + "NuSVC", + "SVR", + "NuSVR", + "OneClassSVM", + ], +) +def test_model_methods_wrapped_in_function_trace(svm_model_name, run_svm_model): + expected_scoped_metrics = { + "LinearSVC": [ + ("Function/MLModel/Sklearn/Named/LinearSVC.fit", 1), + ("Function/MLModel/Sklearn/Named/LinearSVC.predict", 1), + ], + "LinearSVR": [ + ("Function/MLModel/Sklearn/Named/LinearSVR.fit", 1), + ("Function/MLModel/Sklearn/Named/LinearSVR.predict", 1), + ], + "SVC": [ + ("Function/MLModel/Sklearn/Named/SVC.fit", 1), + ("Function/MLModel/Sklearn/Named/SVC.predict", 1), + ], + "NuSVC": [ + ("Function/MLModel/Sklearn/Named/NuSVC.fit", 1), + ("Function/MLModel/Sklearn/Named/NuSVC.predict", 1), + ], + "SVR": [ + ("Function/MLModel/Sklearn/Named/SVR.fit", 1), + ("Function/MLModel/Sklearn/Named/SVR.predict", 1), + ], + "NuSVR": [ + ("Function/MLModel/Sklearn/Named/NuSVR.fit", 1), + ("Function/MLModel/Sklearn/Named/NuSVR.predict", 1), + ], + "OneClassSVM": [ + ("Function/MLModel/Sklearn/Named/OneClassSVM.fit", 1), + ("Function/MLModel/Sklearn/Named/OneClassSVM.predict", 1), + ], + } + + expected_transaction_name = ( + "test_svm_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_svm_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[svm_model_name], + rollup_metrics=expected_scoped_metrics[svm_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_svm_model(svm_model_name) + + _test() + + +@pytest.fixture +def run_svm_model(): + def _run(svm_model_name): + import sklearn.svm + from sklearn.datasets import load_iris + from sklearn.model_selection import train_test_split + + X, y = load_iris(return_X_y=True) + x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) + + kwargs = {"random_state": 0} + if svm_model_name in ["SVR", "NuSVR", "OneClassSVM"]: + kwargs = {} + clf = getattr(sklearn.svm, svm_model_name)(**kwargs) + + model = clf.fit(x_train, y_train) + model.predict(x_test) + + return model + + return _run diff --git a/tests/mlmodel_sklearn/test_tree_models.py b/tests/mlmodel_sklearn/test_tree_models.py new file mode 100644 index 000000000..b30b7e2ea --- /dev/null +++ b/tests/mlmodel_sklearn/test_tree_models.py @@ -0,0 +1,158 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from testing_support.validators.validate_transaction_metrics import ( + validate_transaction_metrics, +) + +from newrelic.api.background_task import background_task +from newrelic.packages import six + + +def test_model_methods_wrapped_in_function_trace(tree_model_name, run_tree_model): + # Note: in the following expected metrics, predict and predict_proba are called by + # score and predict_log_proba so they are expected to be called twice instead of + # once like the rest of the methods. + expected_scoped_metrics = { + "ExtraTreeRegressor": [ + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.score", 1), + ], + "DecisionTreeClassifier": [ + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict_proba", 2), + ], + "ExtraTreeClassifier": [ + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.score", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict_log_proba", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict_proba", 2), + ], + "DecisionTreeRegressor": [ + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.predict", 2), + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.score", 1), + ], + } + expected_transaction_name = ( + "test_tree_models:test_model_methods_wrapped_in_function_trace.._test" + if six.PY3 + else "test_tree_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[tree_model_name], + rollup_metrics=expected_scoped_metrics[tree_model_name], + background_task=True, + ) + @background_task() + def _test(): + run_tree_model() + + _test() + + +def test_multiple_calls_to_model_methods(tree_model_name, run_tree_model): + # Note: in the following expected metrics, predict and predict_proba are called by + # score and predict_log_proba so they are expected to be called twice as often as + # the other methods. + expected_scoped_metrics = { + "ExtraTreeRegressor": [ + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.predict", 4), + ("Function/MLModel/Sklearn/Named/ExtraTreeRegressor.score", 2), + ], + "DecisionTreeClassifier": [ + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict", 4), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.score", 2), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/DecisionTreeClassifier.predict_proba", 4), + ], + "ExtraTreeClassifier": [ + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.fit", 1), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict", 4), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.score", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict_log_proba", 2), + ("Function/MLModel/Sklearn/Named/ExtraTreeClassifier.predict_proba", 4), + ], + "DecisionTreeRegressor": [ + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.fit", 1), + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.predict", 4), + ("Function/MLModel/Sklearn/Named/DecisionTreeRegressor.score", 2), + ], + } + expected_transaction_name = ( + "test_tree_models:test_multiple_calls_to_model_methods.._test" if six.PY3 else "test_tree_models:_test" + ) + + @validate_transaction_metrics( + expected_transaction_name, + scoped_metrics=expected_scoped_metrics[tree_model_name], + rollup_metrics=expected_scoped_metrics[tree_model_name], + background_task=True, + ) + @background_task() + def _test(): + x_test = [[2.0, 2.0], [2.0, 1.0]] + y_test = [1, 1] + + model = run_tree_model() + + model.predict(x_test) + model.score(x_test, y_test) + # Some models don't have these methods. + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + + _test() + + +@pytest.fixture(params=["ExtraTreeRegressor", "DecisionTreeClassifier", "ExtraTreeClassifier", "DecisionTreeRegressor"]) +def tree_model_name(request): + return request.param + + +@pytest.fixture +def run_tree_model(tree_model_name): + def _run(): + import sklearn.tree + + x_train = [[0, 0], [1, 1]] + y_train = [0, 1] + x_test = [[2.0, 2.0], [2.0, 1.0]] + y_test = [1, 1] + + clf = getattr(sklearn.tree, tree_model_name)(random_state=0) + model = clf.fit(x_train, y_train) + + labels = model.predict(x_test) + model.score(x_test, y_test) + # Some models don't have these methods. + if hasattr(model, "predict_log_proba"): + model.predict_log_proba(x_test) + if hasattr(model, "predict_proba"): + model.predict_proba(x_test) + return model + + return _run diff --git a/tests/testing_support/fixtures.py b/tests/testing_support/fixtures.py index ce6166f0b..883c3ec59 100644 --- a/tests/testing_support/fixtures.py +++ b/tests/testing_support/fixtures.py @@ -166,7 +166,10 @@ def wrap_shutdown_agent(wrapped, instance, args, kwargs): def wrap_record_custom_metric(wrapped, instance, args, kwargs): def _bind_params(name, value, *args, **kwargs): - return name + if isinstance(name, tuple): + return name[0] + else: + return name metric_name = _bind_params(*args, **kwargs) if ( diff --git a/tests/testing_support/validators/validate_dimensional_metric_payload.py b/tests/testing_support/validators/validate_dimensional_metric_payload.py new file mode 100644 index 000000000..2f4f48c07 --- /dev/null +++ b/tests/testing_support/validators/validate_dimensional_metric_payload.py @@ -0,0 +1,187 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper +from newrelic.core.otlp_utils import otlp_content_setting + +if otlp_content_setting == "protobuf": + from google.protobuf.json_format import MessageToDict +else: + MessageToDict = None + + +def data_points_to_dict(data_points): + return { + frozenset( + {attr["key"]: attribute_to_value(attr["value"]) for attr in (data_point.get("attributes") or [])}.items() + ) + or None: data_point + for data_point in data_points + } + + +def attribute_to_value(attribute): + attribute_type, attribute_value = next(iter(attribute.items())) + if attribute_type == "int_value": + return int(attribute_value) + elif attribute_type == "double_value": + return float(attribute_value) + elif attribute_type == "bool_value": + return bool(attribute_value) + elif attribute_type == "string_value": + return str(attribute_value) + else: + raise TypeError("Invalid attribute type: %s" % attribute_type) + + +def payload_to_metrics(payload): + if type(payload) is not dict: + message = MessageToDict(payload, use_integers_for_enums=True, preserving_proto_field_name=True) + else: + message = payload + + resource_metrics = message.get("resource_metrics") + assert len(resource_metrics) == 1 + resource_metrics = resource_metrics[0] + + resource = resource_metrics.get("resource") + assert resource and resource.get("attributes")[0] == { + "key": "instrumentation.provider", + "value": {"string_value": "newrelic-opentelemetry-python-ml"}, + } + scope_metrics = resource_metrics.get("scope_metrics") + assert len(scope_metrics) == 1 + scope_metrics = scope_metrics[0] + + scope = scope_metrics.get("scope") + assert scope is None + metrics = scope_metrics.get("metrics") + + sent_summary_metrics = {} + sent_count_metrics = {} + for metric in metrics: + metric_name = metric["name"] + if metric.get("sum"): + sent_count_metrics[metric_name] = metric + elif metric.get("summary"): + sent_summary_metrics[metric_name] = metric + else: + raise TypeError("Unknown metrics type for metric: %s" % metric) + + return sent_summary_metrics, sent_count_metrics + + +def validate_dimensional_metric_payload(summary_metrics=None, count_metrics=None): + # Validates OTLP metrics as they are sent to the collector. + + summary_metrics = summary_metrics or [] + count_metrics = count_metrics or [] + + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + recorded_metrics = [] + + @transient_function_wrapper("newrelic.core.agent_protocol", "OtlpProtocol.send") + def send_request_wrapper(wrapped, instance, args, kwargs): + def _bind_params(method, payload=(), *args, **kwargs): + return method, payload + + method, payload = _bind_params(*args, **kwargs) + + if method == "dimensional_metric_data" and payload: + recorded_metrics.append(payload) + + return wrapped(*args, **kwargs) + + wrapped = send_request_wrapper(wrapped) + val = wrapped(*args, **kwargs) + assert recorded_metrics + + decoded_payloads = [payload_to_metrics(payload) for payload in recorded_metrics] + for sent_summary_metrics, sent_count_metrics in decoded_payloads: + for metric, tags, count in summary_metrics: + if isinstance(tags, dict): + tags = frozenset(tags.items()) + + if not count: + if metric in sent_summary_metrics: + data_points = data_points_to_dict(sent_summary_metrics[metric]["summary"]["data_points"]) + assert tags not in data_points, "(%s, %s) Unexpected but found." % (metric, tags and dict(tags)) + else: + assert metric in sent_summary_metrics, "%s Not Found. Got: %s" % ( + metric, + list(sent_summary_metrics.keys()), + ) + data_points = data_points_to_dict(sent_summary_metrics[metric]["summary"]["data_points"]) + assert tags in data_points, "(%s, %s) Not Found. Got: %s" % ( + metric, + tags and dict(tags), + list(data_points.keys()), + ) + + # Validate metric format + metric_container = data_points[tags] + for key in ("start_time_unix_nano", "time_unix_nano", "count", "sum", "quantile_values"): + assert key in metric_container, "Invalid metric format. Missing key: %s" % key + quantile_values = metric_container["quantile_values"] + assert len(quantile_values) == 2 # Min and Max + + # Validate metric count + if count != "present": + assert int(metric_container["count"]) == count, "(%s, %s): Expected: %s Got: %s" % ( + metric, + tags and dict(tags), + count, + metric_container["count"], + ) + + for metric, tags, count in count_metrics: + if isinstance(tags, dict): + tags = frozenset(tags.items()) + + if not count: + if metric in sent_count_metrics: + data_points = data_points_to_dict(sent_count_metrics[metric]["sum"]["data_points"]) + assert tags not in data_points, "(%s, %s) Unexpected but found." % (metric, tags and dict(tags)) + else: + assert metric in sent_count_metrics, "%s Not Found. Got: %s" % ( + metric, + list(sent_count_metrics.keys()), + ) + data_points = data_points_to_dict(sent_count_metrics[metric]["sum"]["data_points"]) + assert tags in data_points, "(%s, %s) Not Found. Got: %s" % ( + metric, + tags and dict(tags), + list(data_points.keys()), + ) + + # Validate metric format + assert sent_count_metrics[metric]["sum"].get("is_monotonic") + assert sent_count_metrics[metric]["sum"].get("aggregation_temporality") == 1 + metric_container = data_points[tags] + for key in ("start_time_unix_nano", "time_unix_nano", "as_int"): + assert key in metric_container, "Invalid metric format. Missing key: %s" % key + + # Validate metric count + if count != "present": + assert int(metric_container["as_int"]) == count, "(%s, %s): Expected: %s Got: %s" % ( + metric, + tags and dict(tags), + count, + metric_container["count"], + ) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py b/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py new file mode 100644 index 000000000..2854a7478 --- /dev/null +++ b/tests/testing_support/validators/validate_dimensional_metrics_outside_transaction.py @@ -0,0 +1,99 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from testing_support.fixtures import catch_background_exceptions +from newrelic.common.object_wrapper import transient_function_wrapper, function_wrapper + + +def validate_dimensional_metrics_outside_transaction(dimensional_metrics=None): + dimensional_metrics = dimensional_metrics or [] + + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + + record_dimensional_metric_called = [] + recorded_metrics = [None] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_dimensional_metric") + @catch_background_exceptions + def _validate_dimensional_metrics_outside_transaction(wrapped, instance, args, kwargs): + record_dimensional_metric_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + else: + metrics = instance.dimensional_stats_table.metrics() + # Record a copy of the metric value so that the values aren't + # merged in the future + _metrics = {} + for k, v in metrics: + _metrics[k] = copy.copy(v) + recorded_metrics[0] = _metrics + + return result + + def _validate(metrics, name, tags, count): + key = (name, tags) + # Dimensional metric lookup + metric_container = metrics.get(name, {}) + metric = metric_container.get(tags) + + def _metrics_table(): + out = [""] + out.append("Expected: {0}: {1}".format(key, count)) + for metric_key, metric_container in metrics.items(): + if isinstance(metric_container, dict): + for metric_tags, metric_value in metric_container.items(): + out.append("{0}: {1}".format((metric_key, metric_tags), metric_value[0])) + else: + out.append("{0}: {1}".format(metric_key, metric_container[0])) + return "\n".join(out) + + def _metric_details(): + return "metric=%r, count=%r" % (key, metric.call_count) + + if count is not None: + assert metric is not None, _metrics_table() + if count == "present": + assert metric.call_count > 0, _metric_details() + else: + assert metric.call_count == count, _metric_details() + + assert metric.total_call_time >= 0, (key, metric) + assert metric.total_exclusive_call_time >= 0, (key, metric) + assert metric.min_call_time >= 0, (key, metric) + assert metric.sum_of_squares >= 0, (key, metric) + + else: + assert metric is None, _metrics_table() + + _new_wrapper = _validate_dimensional_metrics_outside_transaction(wrapped) + val = _new_wrapper(*args, **kwargs) + assert record_dimensional_metric_called + metrics = recorded_metrics[0] + + record_dimensional_metric_called[:] = [] + recorded_metrics[:] = [] + + for dimensional_metric, dimensional_tags, count in dimensional_metrics: + if isinstance(dimensional_tags, dict): + dimensional_tags = frozenset(dimensional_tags.items()) + _validate(metrics, dimensional_metric, dimensional_tags, count) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_log_events_outside_transaction.py b/tests/testing_support/validators/validate_log_events_outside_transaction.py index f46b6e843..4bc941965 100644 --- a/tests/testing_support/validators/validate_log_events_outside_transaction.py +++ b/tests/testing_support/validators/validate_log_events_outside_transaction.py @@ -14,11 +14,11 @@ import copy +from testing_support.fixtures import catch_background_exceptions + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper from newrelic.packages import six -from newrelic.common.object_wrapper import (transient_function_wrapper, - function_wrapper) -from testing_support.fixtures import catch_background_exceptions def validate_log_events_outside_transaction(events): @function_wrapper @@ -35,18 +35,16 @@ def _validate_log_events_outside_transaction(wrapped, instance, args, kwargs): result = wrapped(*args, **kwargs) except: raise - else: - recorded_logs[:] = [] - recorded_logs.extend(list(instance._log_events)) + recorded_logs[:] = [] + recorded_logs.extend(list(instance._log_events)) return result - _new_wrapper = _validate_log_events_outside_transaction(wrapped) val = _new_wrapper(*args, **kwargs) assert record_called logs = copy.copy(recorded_logs) - + record_called[:] = [] recorded_logs[:] = [] @@ -60,7 +58,6 @@ def _validate_log_events_outside_transaction(wrapped, instance, args, kwargs): return val - def _check_log_attributes(expected, captured, mismatches): for key, value in six.iteritems(expected): if hasattr(captured, key): diff --git a/tests/testing_support/validators/validate_ml_event_count.py b/tests/testing_support/validators/validate_ml_event_count.py new file mode 100644 index 000000000..ec5de8dcf --- /dev/null +++ b/tests/testing_support/validators/validate_ml_event_count.py @@ -0,0 +1,54 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from testing_support.fixtures import catch_background_exceptions + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper + + +def validate_ml_event_count(count=1): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + + record_called = [] + recorded_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @catch_background_exceptions + def _validate_ml_event_count(wrapped, instance, args, kwargs): + record_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + recorded_events.extend(list(instance._ml_events)) + + return result + + _new_wrapper = _validate_ml_event_count(wrapped) + val = _new_wrapper(*args, **kwargs) + if count: + assert record_called + events = copy.copy(recorded_events) + + record_called[:] = [] + recorded_events[:] = [] + + assert count == len(events), len(events) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_ml_event_count_outside_transaction.py b/tests/testing_support/validators/validate_ml_event_count_outside_transaction.py new file mode 100644 index 000000000..6ac764d1a --- /dev/null +++ b/tests/testing_support/validators/validate_ml_event_count_outside_transaction.py @@ -0,0 +1,55 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from testing_support.fixtures import catch_background_exceptions + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper + + +def validate_ml_event_count_outside_transaction(count=1): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + + record_called = [] + recorded_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_ml_event") + @catch_background_exceptions + def _validate_ml_event_count_outside_transaction(wrapped, instance, args, kwargs): + record_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + recorded_events[:] = [] + recorded_events.extend(list(instance._ml_events)) + + return result + + _new_wrapper = _validate_ml_event_count_outside_transaction(wrapped) + val = _new_wrapper(*args, **kwargs) + if count: + assert record_called + events = copy.copy(recorded_events) + + record_called[:] = [] + recorded_events[:] = [] + + assert count == len(events), len(events) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_ml_event_payload.py b/tests/testing_support/validators/validate_ml_event_payload.py new file mode 100644 index 000000000..4d43cbb22 --- /dev/null +++ b/tests/testing_support/validators/validate_ml_event_payload.py @@ -0,0 +1,104 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper +from newrelic.core.otlp_utils import otlp_content_setting + +if otlp_content_setting == "protobuf": + from google.protobuf.json_format import MessageToDict +else: + MessageToDict = None + + +def attribute_to_value(attribute): + attribute_type, attribute_value = next(iter(attribute.items())) + if attribute_type == "int_value": + return int(attribute_value) + elif attribute_type == "double_value": + return float(attribute_value) + elif attribute_type == "bool_value": + return bool(attribute_value) + elif attribute_type == "string_value": + return str(attribute_value) + else: + raise TypeError("Invalid attribute type: %s" % attribute_type) + + +def payload_to_ml_events(payload): + if type(payload) is not dict: + message = MessageToDict(payload, use_integers_for_enums=True, preserving_proto_field_name=True) + else: + message = payload + + resource_logs = message.get("resource_logs") + assert len(resource_logs) == 1 + resource_logs = resource_logs[0] + resource = resource_logs.get("resource") + assert resource and resource.get("attributes")[0] == { + "key": "instrumentation.provider", + "value": {"string_value": "newrelic-opentelemetry-python-ml"}, + } + scope_logs = resource_logs.get("scope_logs") + assert len(scope_logs) == 1 + scope_logs = scope_logs[0] + + scope = scope_logs.get("scope") + assert scope is None + logs = scope_logs.get("log_records") + + return logs + + +def validate_ml_event_payload(ml_events=None): + # Validates OTLP events as they are sent to the collector. + + ml_events = ml_events or [] + + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + recorded_ml_events = [] + + @transient_function_wrapper("newrelic.core.agent_protocol", "OtlpProtocol.send") + def send_request_wrapper(wrapped, instance, args, kwargs): + def _bind_params(method, payload=(), *args, **kwargs): + return method, payload + + method, payload = _bind_params(*args, **kwargs) + + if method == "ml_event_data" and payload: + recorded_ml_events.append(payload) + + return wrapped(*args, **kwargs) + + wrapped = send_request_wrapper(wrapped) + val = wrapped(*args, **kwargs) + assert recorded_ml_events + + decoded_payloads = [payload_to_ml_events(payload) for payload in recorded_ml_events] + all_logs = [] + for sent_logs in decoded_payloads: + for data_point in sent_logs: + for key in ("time_unix_nano",): + assert key in data_point, "Invalid log format. Missing key: %s" % key + + all_logs.append( + {attr["key"]: attribute_to_value(attr["value"]) for attr in (data_point.get("attributes") or [])} + ) + + for expected_event in ml_events: + assert expected_event in all_logs, "%s Not Found. Got: %s" % (expected_event, all_logs) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_ml_events.py b/tests/testing_support/validators/validate_ml_events.py new file mode 100644 index 000000000..251e8dbe7 --- /dev/null +++ b/tests/testing_support/validators/validate_ml_events.py @@ -0,0 +1,110 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import time + +from testing_support.fixtures import catch_background_exceptions + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper +from newrelic.packages import six + + +def validate_ml_events(events): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + + record_called = [] + recorded_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") + @catch_background_exceptions + def _validate_ml_events(wrapped, instance, args, kwargs): + record_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + recorded_events[:] = [] + recorded_events.extend(list(instance._ml_events)) + + return result + + _new_wrapper = _validate_ml_events(wrapped) + val = _new_wrapper(*args, **kwargs) + assert record_called + found_events = copy.copy(recorded_events) + + record_called[:] = [] + recorded_events[:] = [] + + for expected in events: + matching_ml_events = 0 + mismatches = [] + for captured in found_events: + if _check_event_attributes(expected, captured, mismatches): + matching_ml_events += 1 + assert matching_ml_events == 1, _event_details(matching_ml_events, events, mismatches) + + return val + + return _validate_wrapper + + +def _check_event_attributes(expected, captured, mismatches): + assert len(captured) == 2 # [intrinsic, user attributes] + + intrinsics = captured[0] + + if intrinsics["type"] != expected[0]["type"]: + mismatches.append("key: type, value:<%s><%s>" % (expected[0]["type"], captured[0].get("type", None))) + return False + + now = time.time() + + if not (isinstance(intrinsics["timestamp"], int) and intrinsics["timestamp"] <= 1000.0 * now): + mismatches.append("key: timestamp, value:<%s>" % intrinsics["timestamp"]) + return False + + captured_keys = set(six.iterkeys(captured[1])) + expected_keys = set(six.iterkeys(expected[1])) + extra_keys = captured_keys - expected_keys + + if extra_keys: + mismatches.append("extra_keys: %s" % str(tuple(extra_keys))) + return False + + for key, value in six.iteritems(expected[1]): + if key in captured[1]: + captured_value = captured[1].get(key, None) + else: + mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured[1].get(key, None))) + return False + + if value is not None: + if value != captured_value: + mismatches.append("key: %s, value:<%s><%s>" % (key, value, captured_value)) + return False + + return True + + +def _event_details(matching_ml_events, captured, mismatches): + details = [ + "matching_ml_events=%d" % matching_ml_events, + "mismatches=%s" % mismatches, + "captured_events=%s" % captured, + ] + + return "\n".join(details) diff --git a/tests/testing_support/validators/validate_ml_events_outside_transaction.py b/tests/testing_support/validators/validate_ml_events_outside_transaction.py new file mode 100644 index 000000000..107771442 --- /dev/null +++ b/tests/testing_support/validators/validate_ml_events_outside_transaction.py @@ -0,0 +1,64 @@ +# Copyright 2010 New Relic, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from testing_support.fixtures import catch_background_exceptions +from testing_support.validators.validate_ml_events import ( + _check_event_attributes, + _event_details, +) + +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper + + +def validate_ml_events_outside_transaction(events): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + + record_called = [] + recorded_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_ml_event") + @catch_background_exceptions + def _validate_ml_events_outside_transaction(wrapped, instance, args, kwargs): + record_called.append(True) + try: + result = wrapped(*args, **kwargs) + except: + raise + recorded_events[:] = [] + recorded_events.extend(list(instance._ml_events)) + + return result + + _new_wrapper = _validate_ml_events_outside_transaction(wrapped) + val = _new_wrapper(*args, **kwargs) + assert record_called + events = copy.copy(recorded_events) + + record_called[:] = [] + recorded_events[:] = [] + + for expected in events: + matching_ml_events = 0 + mismatches = [] + for captured in events: + if _check_event_attributes(expected, captured, mismatches): + matching_ml_events += 1 + assert matching_ml_events == 1, _event_details(matching_ml_events, events, mismatches) + + return val + + return _validate_wrapper diff --git a/tests/testing_support/validators/validate_transaction_metrics.py b/tests/testing_support/validators/validate_transaction_metrics.py index 7122b009a..0cb569d29 100644 --- a/tests/testing_support/validators/validate_transaction_metrics.py +++ b/tests/testing_support/validators/validate_transaction_metrics.py @@ -27,11 +27,13 @@ def validate_transaction_metrics( scoped_metrics=None, rollup_metrics=None, custom_metrics=None, + dimensional_metrics=None, index=-1, ): scoped_metrics = scoped_metrics or [] rollup_metrics = rollup_metrics or [] custom_metrics = custom_metrics or [] + dimensional_metrics = dimensional_metrics or [] if background_task: unscoped_metrics = [ @@ -56,6 +58,7 @@ def _validate_wrapper(wrapped, instance, args, kwargs): record_transaction_called = [] recorded_metrics = [] + recorded_dimensional_metrics = [] @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") @catch_background_exceptions @@ -74,17 +77,36 @@ def _validate_transaction_metrics(wrapped, instance, args, kwargs): _metrics[k] = copy.copy(v) recorded_metrics.append(_metrics) + metrics = instance.dimensional_stats_table.metrics() + # Record a copy of the metric value so that the values aren't + # merged in the future + _metrics = {} + for k, v in metrics: + _metrics[k] = copy.copy(v) + recorded_dimensional_metrics.append(_metrics) + return result def _validate(metrics, name, scope, count): key = (name, scope) - metric = metrics.get(key) + + if isinstance(scope, str): + # Normal metric lookup + metric = metrics.get(key) + else: + # Dimensional metric lookup + metric_container = metrics.get(name, {}) + metric = metric_container.get(scope) def _metrics_table(): out = [""] out.append("Expected: {0}: {1}".format(key, count)) - for metric_key, metric_value in metrics.items(): - out.append("{0}: {1}".format(metric_key, metric_value[0])) + for metric_key, metric_container in metrics.items(): + if isinstance(metric_container, dict): + for metric_tags, metric_value in metric_container.items(): + out.append("{0}: {1}".format((metric_key, metric_tags), metric_value[0])) + else: + out.append("{0}: {1}".format(metric_key, metric_container[0])) return "\n".join(out) def _metric_details(): @@ -109,9 +131,11 @@ def _metric_details(): val = _new_wrapper(*args, **kwargs) assert record_transaction_called metrics = recorded_metrics[index] + captured_dimensional_metrics = recorded_dimensional_metrics[index] record_transaction_called[:] = [] recorded_metrics[:] = [] + recorded_dimensional_metrics[:] = [] for unscoped_metric in unscoped_metrics: _validate(metrics, unscoped_metric, "", 1) @@ -125,6 +149,11 @@ def _metric_details(): for custom_name, custom_count in custom_metrics: _validate(metrics, custom_name, "", custom_count) + for dimensional_name, dimensional_tags, dimensional_count in dimensional_metrics: + if isinstance(dimensional_tags, dict): + dimensional_tags = frozenset(dimensional_tags.items()) + _validate(captured_dimensional_metrics, dimensional_name, dimensional_tags, dimensional_count) + custom_metric_names = {name for name, _ in custom_metrics} for name, _ in metrics: if name not in custom_metric_names: @@ -132,4 +161,4 @@ def _metric_details(): return val - return _validate_wrapper \ No newline at end of file + return _validate_wrapper diff --git a/tox.ini b/tox.ini index 76eba20c5..b142cd840 100644 --- a/tox.ini +++ b/tox.ini @@ -63,6 +63,8 @@ envlist = python-agent_unittests-{pypy27,pypy38}-without_extensions, python-application_celery-{py27,py37,py38,py39,py310,py311,pypy27,pypy38}, gearman-application_gearman-{py27,pypy27}, + python-mlmodel_sklearn-{py38,py39,py310,py311}-scikitlearnlatest, + python-mlmodel_sklearn-{py37}-scikitlearn0101, python-component_djangorestframework-py27-djangorestframework0300, python-component_djangorestframework-{py37,py38,py39,py310,py311}-djangorestframeworklatest, python-component_flask_rest-{py37,py38,py39,pypy38}-flaskrestxlatest, @@ -186,9 +188,17 @@ deps = adapter_waitress-waitress02: waitress<2.1 adapter_waitress-waitresslatest: waitress agent_features: beautifulsoup4 + agent_features-{py37,py38,py39,py310,py311,pypy38}: protobuf + agent_features-{py27,pypy27}: protobuf<3.18.0 application_celery: celery<6.0 application_celery-{py37,pypy38}: importlib-metadata<5.0 application_gearman: gearman<3.0.0 + mlmodel_sklearn: pandas + mlmodel_sklearn: protobuf + mlmodel_sklearn: numpy + mlmodel_sklearn: scipy<1.11.0 + mlmodel_sklearn-scikitlearnlatest: scikit-learn + mlmodel_sklearn-scikitlearn0101: scikit-learn<1.1 component_djangorestframework-djangorestframework0300: Django<1.9 component_djangorestframework-djangorestframework0300: djangorestframework<3.1 component_djangorestframework-djangorestframeworklatest: Django @@ -399,6 +409,7 @@ changedir = agent_unittests: tests/agent_unittests application_celery: tests/application_celery application_gearman: tests/application_gearman + mlmodel_sklearn: tests/mlmodel_sklearn component_djangorestframework: tests/component_djangorestframework component_flask_rest: tests/component_flask_rest component_graphqlserver: tests/component_graphqlserver