# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 2.185e-05 go_gc_duration_seconds{quantile="0.25"} 3.0962e-05 go_gc_duration_seconds{quantile="0.5"} 3.5266e-05 go_gc_duration_seconds{quantile="0.75"} 4.5326e-05 go_gc_duration_seconds{quantile="1"} 0.002944205 go_gc_duration_seconds_sum 7.584115139 go_gc_duration_seconds_count 79497 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 299 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.12.4"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 8.37809152e+08 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 4.2560235503832e+13 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 1.5481708e+07 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 2.94249829326e+11 # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. # TYPE go_memstats_gc_cpu_fraction gauge go_memstats_gc_cpu_fraction 0.00637443749989841 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 2.08504832e+08 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 8.37809152e+08 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 4.221517824e+09 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 9.43407104e+08 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 6.666536e+06 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 2.481790976e+09 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 5.164924928e+09 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.764746217437199e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 2.94256495862e+11 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 6944 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 16384 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 1.660464e+07 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 7.0090752e+07 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 1.252393344e+09 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 1.3327596e+07 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 2.4576e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 2.4576e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 5.4748038e+09 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 15 # HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name. # TYPE net_conntrack_dialer_conn_attempted_total counter net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 234 net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-apiservers"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-cadvisor"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-ingress"} 2.197279e+06 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-nodes"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-pods"} 184429 # HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name. # TYPE net_conntrack_dialer_conn_closed_total counter net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 233 net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-apiservers"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-cadvisor"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-ingress"} 2.197229e+06 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-nodes"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-pods"} 92193 # HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name. # TYPE net_conntrack_dialer_conn_established_total counter net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 234 net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-apiservers"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-cadvisor"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-ingress"} 2.197254e+06 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-nodes"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-pods"} 92221 # HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name. # TYPE net_conntrack_dialer_conn_failed_total counter net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cadvisor",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cadvisor",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cadvisor",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-cadvisor",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-ingress",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-ingress",reason="resolution"} 24 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-ingress",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-ingress",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="refused"} 92189 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="timeout"} 18 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="unknown"} 92208 # HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name. # TYPE net_conntrack_listener_conn_accepted_total counter net_conntrack_listener_conn_accepted_total{listener_name="http"} 19012 # HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name. # TYPE net_conntrack_listener_conn_closed_total counter net_conntrack_listener_conn_closed_total{listener_name="http"} 19018 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 606002.32 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 142 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 3.396235264e+09 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.76336376812e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 2.9833383936e+10 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes -1 # HELP prometheus_api_remote_read_queries The current number of remote read queries being executed or waiting. # TYPE prometheus_api_remote_read_queries gauge prometheus_api_remote_read_queries 0 # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. # TYPE prometheus_build_info gauge prometheus_build_info{branch="HEAD",goversion="go1.12.4",revision="d3245f15022551c6fc8281766ea62db4d71e2747",version="2.9.2"} 1 # HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. # TYPE prometheus_config_last_reload_success_timestamp_seconds gauge prometheus_config_last_reload_success_timestamp_seconds 1.7633637874097195e+09 # HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. # TYPE prometheus_config_last_reload_successful gauge prometheus_config_last_reload_successful 1 # HELP prometheus_engine_queries The current number of queries being executed or waiting. # TYPE prometheus_engine_queries gauge prometheus_engine_queries 0 # HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries. # TYPE prometheus_engine_queries_concurrent_max gauge prometheus_engine_queries_concurrent_max 20 # HELP prometheus_engine_query_duration_seconds Query timings # TYPE prometheus_engine_query_duration_seconds summary prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 1.1971e-05 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.002152922 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.30734446 prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 105353.68790801686 prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 9.123675e+06 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 1.3691e-05 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 0.00028686 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.029151557 prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 9549.929821039892 prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 9.20852e+06 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 6.81e-07 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 1.732e-06 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 3.135e-06 prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 8.698055280003521 prometheus_engine_query_duration_seconds_count{slice="queue_time"} 9.208521e+06 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} 1.973e-06 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} 1.7404e-05 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} 0.000239566 prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 3.730269538000008 prometheus_engine_query_duration_seconds_count{slice="result_sort"} 199394 # HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. # TYPE prometheus_http_request_duration_seconds histogram prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 68 prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 68 prometheus_http_request_duration_seconds_sum{handler="/"} 0.0025271030000000006 prometheus_http_request_duration_seconds_count{handler="/"} 68 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 12 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 14 prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 0.795238114 prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 14 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.1"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.2"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="0.4"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="1"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="3"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="8"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="20"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="60"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="120"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/labels",le="+Inf"} 17 prometheus_http_request_duration_seconds_sum{handler="/api/v1/labels"} 0.029974243999999997 prometheus_http_request_duration_seconds_count{handler="/api/v1/labels"} 17 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 68862 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 68878 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 68881 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 254.45557375399923 prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 68881 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 132435 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 138544 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 161485 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 186227 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 197252 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 199624 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 199880 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 199902 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 199902 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 199902 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 56467.56259379606 prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 199902 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.1"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.2"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="0.4"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="1"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="3"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="8"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="20"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="60"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="120"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/rules",le="+Inf"} 2190 prometheus_http_request_duration_seconds_sum{handler="/api/v1/rules"} 3.4587763020000053 prometheus_http_request_duration_seconds_count{handler="/api/v1/rules"} 2190 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.1"} 109 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.2"} 113 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="0.4"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="1"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="3"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="8"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="20"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="60"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="120"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/series",le="+Inf"} 114 prometheus_http_request_duration_seconds_sum{handler="/api/v1/series"} 3.01663147 prometheus_http_request_duration_seconds_count{handler="/api/v1/series"} 114 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.2"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="0.4"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="3"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="8"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="20"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="60"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="120"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/status/config",le="+Inf"} 4 prometheus_http_request_duration_seconds_sum{handler="/api/v1/status/config"} 0.009489309999999999 prometheus_http_request_duration_seconds_count{handler="/api/v1/status/config"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 4 prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 4 prometheus_http_request_duration_seconds_sum{handler="/config"} 0.041485656999999995 prometheus_http_request_duration_seconds_count{handler="/config"} 4 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 150 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 150 prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.3615746680000002 prometheus_http_request_duration_seconds_count{handler="/graph"} 150 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 91981 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 92067 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 92148 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 92163 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 92166 prometheus_http_request_duration_seconds_sum{handler="/metrics"} 397.2293122069929 prometheus_http_request_duration_seconds_count{handler="/metrics"} 92166 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.1"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.2"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.4"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="1"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="3"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="8"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="20"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="60"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="120"} 71 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="+Inf"} 71 prometheus_http_request_duration_seconds_sum{handler="/static/*filepath"} 0.06358778500000001 prometheus_http_request_duration_seconds_count{handler="/static/*filepath"} 71 # HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests. # TYPE prometheus_http_response_size_bytes histogram prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 68 prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 68 prometheus_http_response_size_bytes_sum{handler="/"} 1972 prometheus_http_response_size_bytes_count{handler="/"} 68 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 14 prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 64498 prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 14 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1000"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="10000"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="100000"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+06"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+07"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+08"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="1e+09"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/labels",le="+Inf"} 17 prometheus_http_response_size_bytes_sum{handler="/api/v1/labels"} 13498 prometheus_http_response_size_bytes_count{handler="/api/v1/labels"} 17 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 68881 prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 2.5397824e+07 prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 68881 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 1576 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 11347 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 125751 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 197933 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 199902 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 199902 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 199902 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 199902 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 199902 prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 2.98621058e+09 prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 199902 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="10000"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="100000"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+06"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+07"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+08"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="1e+09"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/rules",le="+Inf"} 2190 prometheus_http_response_size_bytes_sum{handler="/api/v1/rules"} 6.94303e+06 prometheus_http_response_size_bytes_count{handler="/api/v1/rules"} 2190 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="10000"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="100000"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+06"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+07"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+08"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="1e+09"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/series",le="+Inf"} 114 prometheus_http_response_size_bytes_sum{handler="/api/v1/series"} 630899 prometheus_http_response_size_bytes_count{handler="/api/v1/series"} 114 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="10000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="100000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+06"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+07"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+08"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="1e+09"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/status/config",le="+Inf"} 4 prometheus_http_response_size_bytes_sum{handler="/api/v1/status/config"} 3228 prometheus_http_response_size_bytes_count{handler="/api/v1/status/config"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 4 prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 4 prometheus_http_response_size_bytes_sum{handler="/config"} 33776 prometheus_http_response_size_bytes_count{handler="/config"} 4 prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 150 prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 150 prometheus_http_response_size_bytes_sum{handler="/graph"} 867750 prometheus_http_response_size_bytes_count{handler="/graph"} 150 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 7253 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 92166 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 92166 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 92166 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 92166 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 92166 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 92166 prometheus_http_response_size_bytes_sum{handler="/metrics"} 9.56217626e+08 prometheus_http_response_size_bytes_count{handler="/metrics"} 92166 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1000"} 12 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="10000"} 32 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100000"} 61 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+06"} 71 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+07"} 71 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+08"} 71 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+09"} 71 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="+Inf"} 71 prometheus_http_response_size_bytes_sum{handler="/static/*filepath"} 2.396816e+06 prometheus_http_response_size_bytes_count{handler="/static/*filepath"} 71 # HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active. # TYPE prometheus_notifications_alertmanagers_discovered gauge prometheus_notifications_alertmanagers_discovered 1 # HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager. # TYPE prometheus_notifications_dropped_total counter prometheus_notifications_dropped_total 0 # HELP prometheus_notifications_errors_total Total number of errors sending alert notifications. # TYPE prometheus_notifications_errors_total counter prometheus_notifications_errors_total{alertmanager="http://alertmanager:80/api/v1/alerts"} 0 # HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications. # TYPE prometheus_notifications_latency_seconds summary prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:80/api/v1/alerts",quantile="0.5"} 0.001409795 prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:80/api/v1/alerts",quantile="0.9"} 0.005228601 prometheus_notifications_latency_seconds{alertmanager="http://alertmanager:80/api/v1/alerts",quantile="0.99"} 0.015177828 prometheus_notifications_latency_seconds_sum{alertmanager="http://alertmanager:80/api/v1/alerts"} 35.23376318799986 prometheus_notifications_latency_seconds_count{alertmanager="http://alertmanager:80/api/v1/alerts"} 17752 # HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. # TYPE prometheus_notifications_queue_capacity gauge prometheus_notifications_queue_capacity 10000 # HELP prometheus_notifications_queue_length The number of alert notifications in the queue. # TYPE prometheus_notifications_queue_length gauge prometheus_notifications_queue_length 0 # HELP prometheus_notifications_sent_total Total number of alerts sent. # TYPE prometheus_notifications_sent_total counter prometheus_notifications_sent_total{alertmanager="http://alertmanager:80/api/v1/alerts"} 20575 # HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. # TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge prometheus_remote_storage_highest_timestamp_in_seconds 1.764746224e+09 # HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers. # TYPE prometheus_remote_storage_samples_in_total counter prometheus_remote_storage_samples_in_total 1.1320091423e+10 # HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned. # TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter prometheus_remote_storage_string_interner_zero_reference_releases_total 0 # HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 9.9759e-05 prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.000983462 prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 4.6803491489999995 prometheus_rule_evaluation_duration_seconds_sum 431315.54099302774 prometheus_rule_evaluation_duration_seconds_count 8.939738e+06 # HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. # TYPE prometheus_rule_evaluation_failures_total counter prometheus_rule_evaluation_failures_total 84338 # HELP prometheus_rule_evaluations_total The total number of rule evaluations. # TYPE prometheus_rule_evaluations_total counter prometheus_rule_evaluations_total 8.939738e+06 # HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations. # TYPE prometheus_rule_group_duration_seconds summary prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000147567 prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000176749 prometheus_rule_group_duration_seconds{quantile="0.5"} 0.000542086 prometheus_rule_group_duration_seconds{quantile="0.9"} 0.018962715 prometheus_rule_group_duration_seconds{quantile="0.99"} 5.107468569 prometheus_rule_group_duration_seconds_sum 431333.8094332244 prometheus_rule_group_duration_seconds_count 1.198112e+06 # HELP prometheus_rule_group_interval_seconds The interval of a rule group. # TYPE prometheus_rule_group_interval_seconds gauge prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/alerting.yml;application alerts"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/alerting.yml;source alerts"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;calculation"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;data_collect"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;big_screen"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_containers"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_deployments"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_pods"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_node"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_pods"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_resources"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/nodes-rule.yml;node"} 15 prometheus_rule_group_interval_seconds{rule_group="/etc/prometheus/pods-rule.yml;pods"} 15 # HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation. # TYPE prometheus_rule_group_iterations_missed_total counter prometheus_rule_group_iterations_missed_total 1 # HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed. # TYPE prometheus_rule_group_iterations_total counter prometheus_rule_group_iterations_total 1.198112e+06 # HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation. # TYPE prometheus_rule_group_last_duration_seconds gauge prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/alerting.yml;application alerts"} 0.021828983 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/alerting.yml;source alerts"} 0.0050434 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;calculation"} 0.000302039 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;data_collect"} 5.211803629 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;big_screen"} 0.000664663 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_containers"} 0.000479522 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_deployments"} 0.000309645 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_pods"} 0.000196916 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_node"} 0.000267281 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_pods"} 0.000163082 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_resources"} 0.001597619 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/nodes-rule.yml;node"} 0.010551283 prometheus_rule_group_last_duration_seconds{rule_group="/etc/prometheus/pods-rule.yml;pods"} 0.008988585 # HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds. # TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/alerting.yml;application alerts"} 1.7647462176904953e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/alerting.yml;source alerts"} 1.7647462226504738e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;calculation"} 1.7647462204282355e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/coordinator-rule.yml;data_collect"} 1.764746216456452e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;big_screen"} 1.7647462122464023e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_containers"} 1.7647462231253572e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_deployments"} 1.7647462110812628e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_pods"} 1.7647462141322892e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_node"} 1.7647462213006268e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_pods"} 1.7647462216930108e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_resources"} 1.7647462173600144e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/nodes-rule.yml;node"} 1.764746211180466e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/prometheus/pods-rule.yml;pods"} 1.764746214770234e+09 # HELP prometheus_rule_group_rules The number of rules. # TYPE prometheus_rule_group_rules gauge prometheus_rule_group_rules{rule_group="/etc/prometheus/alerting.yml;application alerts"} 5 prometheus_rule_group_rules{rule_group="/etc/prometheus/alerting.yml;source alerts"} 5 prometheus_rule_group_rules{rule_group="/etc/prometheus/coordinator-rule.yml;calculation"} 4 prometheus_rule_group_rules{rule_group="/etc/prometheus/coordinator-rule.yml;data_collect"} 3 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;big_screen"} 12 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_containers"} 6 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_deployments"} 4 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_namespace_pods"} 1 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_node"} 3 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_pods"} 1 prometheus_rule_group_rules{rule_group="/etc/prometheus/kubernetes-rule.yml;cluster_resources"} 8 prometheus_rule_group_rules{rule_group="/etc/prometheus/nodes-rule.yml;node"} 34 prometheus_rule_group_rules{rule_group="/etc/prometheus/pods-rule.yml;pods"} 11 # HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. # TYPE prometheus_sd_consul_rpc_duration_seconds summary prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 # HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. # TYPE prometheus_sd_consul_rpc_failures_total counter prometheus_sd_consul_rpc_failures_total 0 # HELP prometheus_sd_discovered_targets Current number of discovered targets. # TYPE prometheus_sd_discovered_targets gauge prometheus_sd_discovered_targets{config="5e32f7d98f85ff2d9f2677f4a40d8aa9",name="notify"} 1 prometheus_sd_discovered_targets{config="kubernetes-apiservers",name="scrape"} 110 prometheus_sd_discovered_targets{config="kubernetes-cadvisor",name="scrape"} 4 prometheus_sd_discovered_targets{config="kubernetes-ingress",name="scrape"} 63 prometheus_sd_discovered_targets{config="kubernetes-nodes",name="scrape"} 4 prometheus_sd_discovered_targets{config="kubernetes-pods",name="scrape"} 132 # HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. # TYPE prometheus_sd_dns_lookup_failures_total counter prometheus_sd_dns_lookup_failures_total 0 # HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. # TYPE prometheus_sd_dns_lookups_total counter prometheus_sd_dns_lookups_total 0 # HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. # TYPE prometheus_sd_file_read_errors_total counter prometheus_sd_file_read_errors_total 0 # HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. # TYPE prometheus_sd_file_scan_duration_seconds summary prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN prometheus_sd_file_scan_duration_seconds_sum 0 prometheus_sd_file_scan_duration_seconds_count 0 # HELP prometheus_sd_kubernetes_cache_last_resource_version Last resource version from the Kubernetes API. # TYPE prometheus_sd_kubernetes_cache_last_resource_version gauge prometheus_sd_kubernetes_cache_last_resource_version 3.40702399e+08 # HELP prometheus_sd_kubernetes_cache_list_duration_seconds Duration of a Kubernetes API call in seconds. # TYPE prometheus_sd_kubernetes_cache_list_duration_seconds summary prometheus_sd_kubernetes_cache_list_duration_seconds_sum 0.208261065 prometheus_sd_kubernetes_cache_list_duration_seconds_count 6 # HELP prometheus_sd_kubernetes_cache_list_items Count of items in a list from the Kubernetes API. # TYPE prometheus_sd_kubernetes_cache_list_items summary prometheus_sd_kubernetes_cache_list_items_sum 255 prometheus_sd_kubernetes_cache_list_items_count 6 # HELP prometheus_sd_kubernetes_cache_list_total Total number of list operations. # TYPE prometheus_sd_kubernetes_cache_list_total counter prometheus_sd_kubernetes_cache_list_total 6 # HELP prometheus_sd_kubernetes_cache_short_watches_total Total number of short watch operations. # TYPE prometheus_sd_kubernetes_cache_short_watches_total counter prometheus_sd_kubernetes_cache_short_watches_total 0 # HELP prometheus_sd_kubernetes_cache_watch_duration_seconds Duration of watches on the Kubernetes API. # TYPE prometheus_sd_kubernetes_cache_watch_duration_seconds summary prometheus_sd_kubernetes_cache_watch_duration_seconds_sum 8.292948617164733e+06 prometheus_sd_kubernetes_cache_watch_duration_seconds_count 18401 # HELP prometheus_sd_kubernetes_cache_watch_events Number of items in watches on the Kubernetes API. # TYPE prometheus_sd_kubernetes_cache_watch_events summary prometheus_sd_kubernetes_cache_watch_events_sum 2.770063e+06 prometheus_sd_kubernetes_cache_watch_events_count 18401 # HELP prometheus_sd_kubernetes_cache_watches_total Total number of watch operations. # TYPE prometheus_sd_kubernetes_cache_watches_total counter prometheus_sd_kubernetes_cache_watches_total 18407 # HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. # TYPE prometheus_sd_kubernetes_events_total counter prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 47 prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 36 prometheus_sd_kubernetes_events_total{event="add",role="node"} 4 prometheus_sd_kubernetes_events_total{event="add",role="pod"} 116 prometheus_sd_kubernetes_events_total{event="add",role="service"} 43 prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 4 prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 4 prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 45 prometheus_sd_kubernetes_events_total{event="delete",role="service"} 4 prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 2.849767e+06 prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 73148 prometheus_sd_kubernetes_events_total{event="update",role="node"} 27650 prometheus_sd_kubernetes_events_total{event="update",role="pod"} 163338 prometheus_sd_kubernetes_events_total{event="update",role="service"} 89273 # HELP prometheus_sd_kubernetes_http_request_duration_seconds Summary of latencies for HTTP requests to the Kubernetes API by endpoint. # TYPE prometheus_sd_kubernetes_http_request_duration_seconds summary prometheus_sd_kubernetes_http_request_duration_seconds_sum{endpoint="/%7Bprefix%7D"} 0.16554870800000002 prometheus_sd_kubernetes_http_request_duration_seconds_count{endpoint="/%7Bprefix%7D"} 6 # HELP prometheus_sd_kubernetes_http_request_total Total number of HTTP requests to the Kubernetes API by status code. # TYPE prometheus_sd_kubernetes_http_request_total counter prometheus_sd_kubernetes_http_request_total{status_code="200"} 18413 # HELP prometheus_sd_kubernetes_workqueue_depth Current depth of the work queue. # TYPE prometheus_sd_kubernetes_workqueue_depth gauge prometheus_sd_kubernetes_workqueue_depth{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="ingress"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="pod"} 0 # HELP prometheus_sd_kubernetes_workqueue_items_total Total number of items added to the work queue. # TYPE prometheus_sd_kubernetes_workqueue_items_total counter prometheus_sd_kubernetes_workqueue_items_total{queue_name="endpoints"} 2.932389e+06 prometheus_sd_kubernetes_workqueue_items_total{queue_name="ingress"} 73188 prometheus_sd_kubernetes_workqueue_items_total{queue_name="node"} 27654 prometheus_sd_kubernetes_workqueue_items_total{queue_name="pod"} 163497 # HELP prometheus_sd_kubernetes_workqueue_latency_seconds How long an item stays in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_latency_seconds summary prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="endpoints"} 302.93509199252367 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="endpoints"} 2.932389e+06 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="ingress"} 44.865398000000624 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="ingress"} 73188 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="node"} 1.629110999999854 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="node"} 27654 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="pod"} 102.3401360000022 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="pod"} 163497 # HELP prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds Duration of the longest running processor in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds gauge prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="ingress"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="pod"} 0 # HELP prometheus_sd_kubernetes_workqueue_unfinished_work_seconds How long an item has remained unfinished in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_unfinished_work_seconds gauge prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="ingress"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="pod"} 0 # HELP prometheus_sd_kubernetes_workqueue_work_duration_seconds How long processing an item from the work queue takes. # TYPE prometheus_sd_kubernetes_workqueue_work_duration_seconds summary prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="endpoints"} 56.58850600035565 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="endpoints"} 2.932389e+06 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="ingress"} 2.937293000000097 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="ingress"} 73188 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="node"} 2.4412049999999765 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="node"} 27654 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="pod"} 3.290884000000746 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="pod"} 163497 # HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers. # TYPE prometheus_sd_received_updates_total counter prometheus_sd_received_updates_total{name="notify"} 2 prometheus_sd_received_updates_total{name="scrape"} 3.196728e+06 # HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers. # TYPE prometheus_sd_updates_total counter prometheus_sd_updates_total{name="notify"} 1 prometheus_sd_updates_total{name="scrape"} 276488 # HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14.975593418 prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14.998995841 prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15.000014577 prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15.000042284 prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15.030414507 prometheus_target_interval_length_seconds_sum{interval="15s"} 8.63919340759314e+07 prometheus_target_interval_length_seconds_count{interval="15s"} 5.759456e+06 # HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape loop reloads. # TYPE prometheus_target_scrape_pool_reloads_failed_total counter prometheus_target_scrape_pool_reloads_failed_total 0 # HELP prometheus_target_scrape_pool_reloads_total Total number of scrape loop reloads. # TYPE prometheus_target_scrape_pool_reloads_total counter prometheus_target_scrape_pool_reloads_total 0 # HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. # TYPE prometheus_target_scrape_pool_sync_total counter prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-apiservers"} 276485 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-cadvisor"} 276485 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-ingress"} 276485 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-nodes"} 276485 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-pods"} 276485 # HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed. # TYPE prometheus_target_scrape_pools_failed_total counter prometheus_target_scrape_pools_failed_total 0 # HELP prometheus_target_scrape_pools_total Total number of scrape pool creation atttempts. # TYPE prometheus_target_scrape_pools_total counter prometheus_target_scrape_pools_total 5 # HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing. # TYPE prometheus_target_scrapes_cache_flush_forced_total counter prometheus_target_scrapes_cache_flush_forced_total 0 # HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected. # TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter prometheus_target_scrapes_exceeded_sample_limit_total 0 # HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values # TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter prometheus_target_scrapes_sample_duplicate_timestamp_total 0 # HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds # TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter prometheus_target_scrapes_sample_out_of_bounds_total 0 # HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order # TYPE prometheus_target_scrapes_sample_out_of_order_total counter prometheus_target_scrapes_sample_out_of_order_total 0 # HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. # TYPE prometheus_target_sync_length_seconds summary prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.01"} 0.001498287 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.05"} 0.001708464 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.5"} 0.002141453 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.9"} 0.002849261 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.99"} 0.041036137 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-apiservers"} 681.9698275580005 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-apiservers"} 276485 prometheus_target_sync_length_seconds{scrape_job="kubernetes-cadvisor",quantile="0.01"} 0.000224371 prometheus_target_sync_length_seconds{scrape_job="kubernetes-cadvisor",quantile="0.05"} 0.000292776 prometheus_target_sync_length_seconds{scrape_job="kubernetes-cadvisor",quantile="0.5"} 0.000357172 prometheus_target_sync_length_seconds{scrape_job="kubernetes-cadvisor",quantile="0.9"} 0.000444324 prometheus_target_sync_length_seconds{scrape_job="kubernetes-cadvisor",quantile="0.99"} 0.001110127 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-cadvisor"} 116.75683640700026 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-cadvisor"} 276485 prometheus_target_sync_length_seconds{scrape_job="kubernetes-ingress",quantile="0.01"} 0.000745745 prometheus_target_sync_length_seconds{scrape_job="kubernetes-ingress",quantile="0.05"} 0.000936612 prometheus_target_sync_length_seconds{scrape_job="kubernetes-ingress",quantile="0.5"} 0.001140299 prometheus_target_sync_length_seconds{scrape_job="kubernetes-ingress",quantile="0.9"} 0.00183842 prometheus_target_sync_length_seconds{scrape_job="kubernetes-ingress",quantile="0.99"} 0.006700502 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-ingress"} 369.14462979500513 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-ingress"} 276485 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.01"} 0.000246123 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.05"} 0.000344892 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.5"} 0.000404851 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.9"} 0.000571649 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.99"} 0.001915182 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-nodes"} 130.35728222000228 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-nodes"} 276485 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.01"} 0.003315237 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.05"} 0.003507134 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.5"} 0.004060335 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.9"} 0.0063015 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.99"} 0.051229133 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-pods"} 1439.7734034830082 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-pods"} 276485 # HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures. # TYPE prometheus_template_text_expansion_failures_total counter prometheus_template_text_expansion_failures_total 0 # HELP prometheus_template_text_expansions_total The total number of template text expansions. # TYPE prometheus_template_text_expansions_total counter prometheus_template_text_expansions_total 221269 # HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. # TYPE prometheus_treecache_watcher_goroutines gauge prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter prometheus_treecache_zookeeper_failures_total 0 # HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks # TYPE prometheus_tsdb_blocks_loaded gauge prometheus_tsdb_blocks_loaded 19 # HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed. # TYPE prometheus_tsdb_checkpoint_creations_failed_total counter prometheus_tsdb_checkpoint_creations_failed_total 0 # HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted. # TYPE prometheus_tsdb_checkpoint_creations_total counter prometheus_tsdb_checkpoint_creations_total 192 # HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed. # TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter prometheus_tsdb_checkpoint_deletions_failed_total 0 # HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted. # TYPE prometheus_tsdb_checkpoint_deletions_total counter prometheus_tsdb_checkpoint_deletions_total 192 # HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 2236 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 2236 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 2236 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 2236 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 5296 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 84161 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 350892 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 1.418893e+06 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 9.2665145e+07 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 9.266531e+07 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 9.266531e+07 prometheus_tsdb_compaction_chunk_range_seconds_sum 1.68548930981539e+14 prometheus_tsdb_compaction_chunk_range_seconds_count 9.266531e+07 # HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction # TYPE prometheus_tsdb_compaction_chunk_samples histogram prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 74977 prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 81093 prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 90518 prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 121959 prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 154206 prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 362071 prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 655408 prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 1.022042e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 1.380031e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 9.1534867e+07 prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 9.2653452e+07 prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 9.2665306e+07 prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 9.266531e+07 prometheus_tsdb_compaction_chunk_samples_sum 1.1240281554e+10 prometheus_tsdb_compaction_chunk_samples_count 9.266531e+07 # HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 187159 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 2.8931435e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 6.6134506e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 7.4351254e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 7.8248962e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 8.190013e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 9.0381965e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 9.1614527e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 9.2156052e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 9.2655554e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 9.2665248e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 9.266531e+07 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 9.266531e+07 prometheus_tsdb_compaction_chunk_size_bytes_sum 9.62069295e+09 prometheus_tsdb_compaction_chunk_size_bytes_count 9.266531e+07 # HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs # TYPE prometheus_tsdb_compaction_duration_seconds histogram prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 0 prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 26 prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 191 prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 254 prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 277 prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 281 prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 284 prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 284 prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 284 prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 284 prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 284 prometheus_tsdb_compaction_duration_seconds_sum 1288.9636564680004 prometheus_tsdb_compaction_duration_seconds_count 284 # HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk. # TYPE prometheus_tsdb_compaction_populating_block gauge prometheus_tsdb_compaction_populating_block 0 # HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition. # TYPE prometheus_tsdb_compactions_failed_total counter prometheus_tsdb_compactions_failed_total 0 # HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition. # TYPE prometheus_tsdb_compactions_total counter prometheus_tsdb_compactions_total 284 # HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition. # TYPE prometheus_tsdb_compactions_triggered_total counter prometheus_tsdb_compactions_triggered_total 23225 # HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions # TYPE prometheus_tsdb_head_active_appenders gauge prometheus_tsdb_head_active_appenders 0 # HELP prometheus_tsdb_head_chunks Total number of chunks in the head block. # TYPE prometheus_tsdb_head_chunks gauge prometheus_tsdb_head_chunks 417259 # HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head # TYPE prometheus_tsdb_head_chunks_created_total counter prometheus_tsdb_head_chunks_created_total 9.3082569e+07 # HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head # TYPE prometheus_tsdb_head_chunks_removed_total counter prometheus_tsdb_head_chunks_removed_total 9.266531e+07 # HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block. # TYPE prometheus_tsdb_head_gc_duration_seconds summary prometheus_tsdb_head_gc_duration_seconds{quantile="0.5"} NaN prometheus_tsdb_head_gc_duration_seconds{quantile="0.9"} NaN prometheus_tsdb_head_gc_duration_seconds{quantile="0.99"} NaN prometheus_tsdb_head_gc_duration_seconds_sum 24.44211323499999 prometheus_tsdb_head_gc_duration_seconds_count 192 # HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_max_time gauge prometheus_tsdb_head_max_time 1.764746224409e+12 # HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block. # TYPE prometheus_tsdb_head_max_time_seconds gauge prometheus_tsdb_head_max_time_seconds 1.764746224409e+09 # HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_min_time gauge prometheus_tsdb_head_min_time 1.7647416e+12 # HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block. # TYPE prometheus_tsdb_head_min_time_seconds gauge prometheus_tsdb_head_min_time_seconds 1.7647416e+09 # HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples. # TYPE prometheus_tsdb_head_samples_appended_total counter prometheus_tsdb_head_samples_appended_total 1.1231962977e+10 # HELP prometheus_tsdb_head_series Total number of series in the head block. # TYPE prometheus_tsdb_head_series gauge prometheus_tsdb_head_series 143088 # HELP prometheus_tsdb_head_series_created_total Total number of series created in the head # TYPE prometheus_tsdb_head_series_created_total counter prometheus_tsdb_head_series_created_total 1.348971e+06 # HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found. # TYPE prometheus_tsdb_head_series_not_found_total counter prometheus_tsdb_head_series_not_found_total 0 # HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head # TYPE prometheus_tsdb_head_series_removed_total counter prometheus_tsdb_head_series_removed_total 1.205883e+06 # HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed. # TYPE prometheus_tsdb_head_truncations_failed_total counter prometheus_tsdb_head_truncations_failed_total 0 # HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted. # TYPE prometheus_tsdb_head_truncations_total counter prometheus_tsdb_head_truncations_total 192 # HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer. # TYPE prometheus_tsdb_lowest_timestamp gauge prometheus_tsdb_lowest_timestamp 1.7620416e+12 # HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database. # TYPE prometheus_tsdb_lowest_timestamp_seconds gauge prometheus_tsdb_lowest_timestamp_seconds 1.7620416e+09 # HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reload block data from disk. # TYPE prometheus_tsdb_reloads_failures_total counter prometheus_tsdb_reloads_failures_total 0 # HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk. # TYPE prometheus_tsdb_reloads_total counter prometheus_tsdb_reloads_total 285 # HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded. # TYPE prometheus_tsdb_size_retentions_total counter prometheus_tsdb_size_retentions_total 0 # HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks. # TYPE prometheus_tsdb_storage_blocks_bytes gauge prometheus_tsdb_storage_blocks_bytes 2.4269005331e+10 # HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table on disk (in bytes) # TYPE prometheus_tsdb_symbol_table_size_bytes gauge prometheus_tsdb_symbol_table_size_bytes 1.183088e+07 # HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded. # TYPE prometheus_tsdb_time_retentions_total counter prometheus_tsdb_time_retentions_total 7 # HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones. # TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.005"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.01"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.025"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.05"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.1"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.25"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="1"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="2.5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="10"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 prometheus_tsdb_tombstone_cleanup_seconds_sum 0 prometheus_tsdb_tombstone_cleanup_seconds_count 0 # HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks. # TYPE prometheus_tsdb_vertical_compactions_total counter prometheus_tsdb_vertical_compactions_total 0 # HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages. # TYPE prometheus_tsdb_wal_completed_pages_total counter prometheus_tsdb_wal_completed_pages_total 4.168232e+06 # HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions. # TYPE prometheus_tsdb_wal_corruptions_total counter prometheus_tsdb_wal_corruptions_total 0 # HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of WAL fsync. # TYPE prometheus_tsdb_wal_fsync_duration_seconds summary prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} 0.048913317 prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} 0.048913317 prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} 0.048913317 prometheus_tsdb_wal_fsync_duration_seconds_sum 38.80993542200002 prometheus_tsdb_wal_fsync_duration_seconds_count 1021 # HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes. # TYPE prometheus_tsdb_wal_page_flushes_total counter prometheus_tsdb_wal_page_flushes_total 1.6125155e+07 # HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation. # TYPE prometheus_tsdb_wal_truncate_duration_seconds summary prometheus_tsdb_wal_truncate_duration_seconds{quantile="0.5"} NaN prometheus_tsdb_wal_truncate_duration_seconds{quantile="0.9"} NaN prometheus_tsdb_wal_truncate_duration_seconds{quantile="0.99"} NaN prometheus_tsdb_wal_truncate_duration_seconds_sum 1998.849198295999 prometheus_tsdb_wal_truncate_duration_seconds_count 192 # HELP prometheus_tsdb_wal_truncations_failed_total Total number of WAL truncations that failed. # TYPE prometheus_tsdb_wal_truncations_failed_total counter prometheus_tsdb_wal_truncations_failed_total 0 # HELP prometheus_tsdb_wal_truncations_total Total number of WAL truncations attempted. # TYPE prometheus_tsdb_wal_truncations_total counter prometheus_tsdb_wal_truncations_total 192 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 92166 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0