{{ ansible_managed | comment }} global: # scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. # evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: # monitor: 'example' # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: ['localhost:9093'] # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: - "alerts/*.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - job_name: 'prometheus' # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: ['{{ lan_address }}:9090'] - job_name: node file_sd_configs: - files: - '/etc/prometheus/node-targets.json' relabel_configs: # Do not put :9100 in instance name, rather here - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - source_labels: [__param_target] target_label: __address__ replacement: '$1:9100'