This commit is contained in:
2025-06-17 17:08:14 +08:00
commit 71179324c8
1205 changed files with 798946 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
services:
# 此镜像仅用于测试 正式环境需自行安装数据库
# SID: XE user: system password: oracle
oracle:
image: tekintian/oracle12c:latest
container_name: oracle
environment:
# 时区上海
TZ: Asia/Shanghai
DBCA_TOTAL_MEMORY: 16192
ports:
- "18080:8080"
- "1521:1521"
volumes:
# 数据挂载
- "/docker/oracle/data:/u01/app/oracle"
network_mode: "host"
postgres:
image: postgres:14.2
container_name: postgres
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: root
POSTGRES_DB: postgres
ports:
- "5432:5432"
volumes:
- /docker/postgres/data:/var/lib/postgresql/data
network_mode: "host"
postgres13:
image: postgres:13.6
container_name: postgres13
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: root
POSTGRES_DB: postgres
ports:
- "5433:5432"
volumes:
- /docker/postgres13/data:/var/lib/postgresql/data
network_mode: "host"

View File

@@ -0,0 +1,524 @@
services:
mysql:
image: mysql:8.0.42
container_name: mysql
environment:
# 时区上海
TZ: Asia/Shanghai
# root 密码
MYSQL_ROOT_PASSWORD: ruoyi123
# 初始化数据库
MYSQL_DATABASE: ry-cloud
ports:
- "3306:3306"
volumes:
# 数据挂载
- /docker/mysql/data/:/var/lib/mysql/
# 配置挂载
- /docker/mysql/conf/:/etc/mysql/conf.d/
command:
# 将mysql8.0默认密码策略 修改为 原先 策略 (mysql8.0对其默认策略做了更改 会导致密码无法匹配)
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
privileged: true
network_mode: "host"
nacos:
image: ruoyi/ruoyi-nacos:2.4.0
container_name: nacos
ports:
- "8848:8848"
- "9848:9848"
- "9849:9849"
environment:
TZ: Asia/Shanghai
JAVA_OPTS: "-Xms256m -Xmx512m"
volumes:
# 日志目录 注意集群模式下 日志目录不能一致 需要区分例如 nacos1 nacos2
- /docker/nacos/logs/:/root/nacos/logs
# 集群配置文件 集群所有nacos都必须使用此文件
- /docker/nacos/conf/cluster.conf:/root/nacos/conf/cluster.conf
network_mode: "host"
redis:
image: redis:7.2.8
container_name: redis
ports:
- "6379:6379"
environment:
# 时区上海
TZ: Asia/Shanghai
volumes:
# 配置文件
- /docker/redis/conf:/redis/config
# 数据文件
- /docker/redis/data/:/redis/data/
command: "redis-server /redis/config/redis.conf"
privileged: true
network_mode: "host"
minio:
# minio 最后一个未阉割版本 不能再进行升级 在往上的版本功能被阉割
image: minio/minio:RELEASE.2025-04-22T22-12-26Z
container_name: minio
ports:
# api 端口
- "9000:9000"
# 控制台端口
- "9001:9001"
environment:
# 时区上海
TZ: Asia/Shanghai
# 管理后台用户名
MINIO_ROOT_USER: ruoyi
# 管理后台密码最小8个字符
MINIO_ROOT_PASSWORD: ruoyi123
# https需要指定域名
#MINIO_SERVER_URL: "https://xxx.com:9000"
#MINIO_BROWSER_REDIRECT_URL: "https://xxx.com:9001"
# 开启压缩 on 开启 off 关闭
MINIO_COMPRESS: "off"
# 扩展名 .pdf,.doc 为空 所有类型均压缩
MINIO_COMPRESS_EXTENSIONS: ""
# mime 类型 application/pdf 为空 所有类型均压缩
MINIO_COMPRESS_MIME_TYPES: ""
volumes:
# 映射当前目录下的data目录至容器内/data目录
- /docker/minio/data:/data
# 映射配置目录
- /docker/minio/config:/root/.minio/
command: server --address ':9000' --console-address ':9001' /data # 指定容器中的目录 /data
privileged: true
network_mode: "host"
seata-server:
image: ruoyi/ruoyi-seata-server:2.4.0
container_name: seata-server
ports:
- "7091:7091"
- "8091:8091"
environment:
TZ: Asia/Shanghai
# 注意 此处ip如果是外网使用 要改为外网ip
# SEATA_IP: 127.0.0.1
SEATA_PORT: 8091
volumes:
- /docker/ruoyi-seata-server/logs/:/ruoyi/seata-server/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
nginx-web:
image: nginx:1.22.1
container_name: nginx-web
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "80:80"
- "443:443"
volumes:
# 证书映射
- /docker/nginx/cert:/etc/nginx/cert
# 配置文件映射
- /docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf
# 页面目录
- /docker/nginx/html:/usr/share/nginx/html
# 日志目录
- /docker/nginx/log:/var/log/nginx
privileged: true
network_mode: "host"
sentinel:
image: ruoyi/ruoyi-sentinel-dashboard:2.4.0
container_name: sentinel
environment:
TZ: Asia/Shanghai
ports:
- "8718:8718"
volumes:
# 配置文件
- /docker/ruoyi-sentinel-dashboard/logs/:/ruoyi/sentinel-dashboard/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
restart: always
network_mode: "host"
ruoyi-monitor:
image: ruoyi/ruoyi-monitor:2.4.0
container_name: ruoyi-monitor
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9100:9100"
volumes:
# 配置文件
- /docker/ruoyi-monitor/logs/:/ruoyi/monitor/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-snailjob-server:
image: ruoyi/ruoyi-snailjob-server:2.4.0
container_name: ruoyi-snailjob-server
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "8800:8800"
- "17888:17888"
volumes:
- /docker/snailjob/logs/:/ruoyi/snailjob/logs
privileged: true
network_mode: "host"
ruoyi-gateway:
image: ruoyi/ruoyi-gateway:2.4.0
container_name: ruoyi-gateway
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "8080:8080"
volumes:
# 配置文件
- /docker/ruoyi-gateway/logs/:/ruoyi/gateway/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-auth:
image: ruoyi/ruoyi-auth:2.4.0
container_name: ruoyi-auth
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9210:9210"
volumes:
# 配置文件
- /docker/ruoyi-auth/logs/:/ruoyi/auth/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-system:
image: ruoyi/ruoyi-system:2.4.0
container_name: ruoyi-system
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9201:9201"
volumes:
# 配置文件
- /docker/ruoyi-system/logs/:/ruoyi/system/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-gen:
image: ruoyi/ruoyi-gen:2.4.0
container_name: ruoyi-gen
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9202:9202"
volumes:
# 配置文件
- /docker/ruoyi-gen/logs/:/ruoyi/gen/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-job:
image: ruoyi/ruoyi-job:2.4.0
container_name: ruoyi-job
environment:
# 时区上海
TZ: Asia/Shanghai
SERVER_PORT: 9203
SNAIL_PORT: 19203
ports:
- "9203:9203"
volumes:
# 配置文件
- /docker/ruoyi-job/logs/:/ruoyi/job/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-resource:
image: ruoyi/ruoyi-resource:2.4.0
container_name: ruoyi-resource
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9204:9204"
volumes:
# 配置文件
- /docker/ruoyi-resource/logs/:/ruoyi/resource/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
ruoyi-workflow:
image: ruoyi/ruoyi-workflow:2.4.0
container_name: ruoyi-workflow
environment:
# 时区上海
TZ: Asia/Shanghai
ports:
- "9205:9205"
volumes:
# 配置文件
- /docker/ruoyi-workflow/logs/:/ruoyi/workflow/logs
# skywalking 探针
- /docker/skywalking/agent/:/ruoyi/skywalking/agent
privileged: true
network_mode: "host"
#################################################################################################
#################################### 以下为扩展根据需求搭建 #########################################
#################################################################################################
elasticsearch:
image: elasticsearch:7.17.6
container_name: elasticsearch
ports:
- "9200:9200"
- "9300:9300"
environment:
# 设置集群名称
cluster.name: elasticsearch
# 以单一节点模式启动
discovery.type: single-node
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- /docker/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins
- /docker/elk/elasticsearch/data:/usr/share/elasticsearch/data
- /docker/elk/elasticsearch/logs:/usr/share/elasticsearch/logs
network_mode: "host"
kibana:
image: kibana:7.17.6
container_name: kibana
ports:
- "5601:5601"
depends_on:
# kibana在elasticsearch启动之后再启动
- elasticsearch
environment:
#设置系统语言文中文
I18N_LOCALE: zh-CN
# 访问域名
# SERVER_PUBLICBASEURL: https://kibana.cloud.com
volumes:
- /docker/elk/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
network_mode: "host"
logstash:
image: logstash:7.17.6
container_name: logstash
ports:
- "4560:4560"
volumes:
- /docker/elk/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- /docker/elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
depends_on:
- elasticsearch
network_mode: "host"
rmqnamesrv:
image: apache/rocketmq:5.2.0
container_name: rmqnamesrv
ports:
- "9876:9876"
environment:
JAVA_OPT: -server -Xms512m -Xmx512m
command: sh mqnamesrv
volumes:
- /docker/rocketmq/namesrv/logs:/home/rocketmq/logs/rocketmqlogs
network_mode: "host"
rmqbroker1:
image: apache/rocketmq:5.2.0
container_name: rmqbroker1
ports:
- "10911:10911"
- "10909:10909"
- "10912:10912"
environment:
JAVA_OPT: -server -Xms512M -Xmx512M
NAMESRV_ADDR: 127.0.0.1:9876
# --enable-proxy 开启broker与proxy共用模式 生产部署建议将proxy单独部署
command: sh mqbroker --enable-proxy -c /home/rocketmq/rocketmq-5.2.0/conf/broker.conf
depends_on:
- rmqnamesrv
volumes:
- /docker/rocketmq/broker1/conf/broker.conf:/home/rocketmq/rocketmq-5.2.0/conf/broker.conf
- /docker/rocketmq/broker1/logs:/home/rocketmq/logs/rocketmqlogs
- /docker/rocketmq/broker1/store:/home/rocketmq/store
privileged: true
network_mode: "host"
rmqconsole:
image: apacherocketmq/rocketmq-dashboard:latest
container_name: rmqconsole
ports:
- "19876:19876"
environment:
JAVA_OPTS: -Dserver.port=19876 -Drocketmq.namesrv.addr=127.0.0.1:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false
depends_on:
- rmqnamesrv
network_mode: "host"
rabbitmq:
container_name: rabbitmq
build:
context: ./rabbitmq
environment:
RABBITMQ_DEFAULT_USER: ruoyi
RABBITMQ_DEFAULT_PASS: ruoyi123
ports:
- "15672:15672" # 管理界面端口
- "5672:5672" # api 端口
volumes:
- /docker/rabbitmq/log:/var/log/rabbitmq
- /docker/rabbitmq/data:/var/lib/rabbitmq
network_mode: "host"
zookeeper:
image: 'bitnami/zookeeper:3.8.0'
container_name: zookeeper
ports:
- "2181:2181"
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_SERVER_ID: 1
ZOO_PORT_NUMBER: 2181
# 自带的控制台 一般用不上可自行开启
ZOO_ENABLE_ADMIN_SERVER: "no"
# 自带控制台的端口
ZOO_ADMIN_SERVER_PORT_NUMBER: 8080
network_mode: "host"
kafka:
image: 'bitnami/kafka:3.6.2'
container_name: kafka
ports:
- "9092:9092"
environment:
TZ: Asia/Shanghai
# 更多变量 查看文档 https://github.com/bitnami/bitnami-docker-kafka/blob/master/README.md
KAFKA_BROKER_ID: 1
# 监听端口
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092
# 实际访问ip 本地用 127 内网用 192 外网用 外网ip
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://192.168.31.165:9092
KAFKA_CFG_ZOOKEEPER_CONNECT: 127.0.0.1:2181
ALLOW_PLAINTEXT_LISTENER: "yes"
volumes:
- /docker/kafka/data:/bitnami/kafka/data
depends_on:
- zookeeper
network_mode: "host"
kafka-manager:
image: sheepkiller/kafka-manager:latest
container_name: kafka-manager
ports:
- "19092:19092"
environment:
ZK_HOSTS: 127.0.0.1:2181
APPLICATION_SECRET: letmein
KAFKA_MANAGER_USERNAME: ruoyi
KAFKA_MANAGER_PASSWORD: ruoyi123
KM_ARGS: -Dhttp.port=19092
depends_on:
- kafka
network_mode: "host"
sky-oap:
image: apache/skywalking-oap-server:9.7.0
container_name: sky-oap
ports:
- "11800:11800"
- "12800:12800"
environment:
JAVA_OPTS: -Xms1G -Xmx2G
#记录数据的有效期,单位天
SW_CORE_RECORD_DATA_TTL: 7
#分析指标数据的有效期,单位天
SW_CORE_METRICS_DATA_TTL: 7
SW_STORAGE: elasticsearch
SW_STORAGE_ES_CLUSTER_NODES: 127.0.0.1:9200
TZ: Asia/Shanghai
network_mode: "host"
sky-ui:
image: apache/skywalking-ui:9.7.0
container_name: sky-ui
ports:
- "18080:18080"
environment:
SW_SERVER_PORT: 18080
SW_OAP_ADDRESS: http://127.0.0.1:12800
TZ: Asia/Shanghai
depends_on:
- sky-oap
network_mode: "host"
prometheus:
image: prom/prometheus:v2.40.1
container_name: prometheus
ports:
- "9090:9090"
volumes:
- /docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
network_mode: "host"
grafana:
image: grafana/grafana:9.2.4
container_name: grafana
environment:
TZ: Asia/Shanghai
# 服务地址 用于指定外网ip或域名
GF_SERVER_ROOT_URL: ""
# admin 管理员密码
GF_SECURITY_ADMIN_PASSWORD: 123456
ports:
- "3000:3000"
volumes:
- /docker/grafana/grafana.ini:/etc/grafana/grafana.ini
- /docker/grafana:/var/lib/grafana
network_mode: "host"
shardingproxy:
image: apache/shardingsphere-proxy:5.4.0
container_name: shardingsphere-proxy
command: server /data
ports:
- "3307:3307"
volumes:
- /docker/shardingproxy/conf:/opt/shardingsphere-proxy/conf
- /docker/shardingproxy/ext-lib:/opt/shardingsphere-proxy/ext-lib
environment:
- JVM_OPTS="-Djava.awt.headless=true"
network_mode: "host"

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/elk/elasticsearch/data` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/elk/elasticsearch/logs` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>IK Analyzer 扩展配置</comment>
<!--用户可以在这里配置自己的扩展字典 -->
<entry key="ext_dict"></entry>
<!--用户可以在这里配置自己的扩展停止词字典-->
<entry key="ext_stopwords"></entry>
<!--用户可以在这里配置远程扩展字典 -->
<!-- <entry key="remote_ext_dict">words_location</entry> -->
<!--用户可以在这里配置远程扩展停止词字典-->
<!-- <entry key="remote_ext_stopwords">words_location</entry> -->
</properties>

View File

@@ -0,0 +1,31 @@
也
使

View File

@@ -0,0 +1,25 @@
使

View File

@@ -0,0 +1,316 @@
世纪
位数
像素
克拉
公亩
公克
公分
公升
公尺
公担
公斤
公里
公顷
分钟
分米
加仑
千克
千米
厘米
周年
小时
平方
平方公尺
平方公里
平方分米
平方厘米
平方码
平方米
平方英寸
平方英尺
平方英里
平米
年代
年级
月份
毫升
毫米
毫克
海里
点钟
盎司
秒钟
立方公尺
立方分米
立方厘米
立方码
立方米
立方英寸
立方英尺
英亩
英寸
英尺
英里
阶段

View File

@@ -0,0 +1,33 @@
a
an
and
are
as
at
be
but
by
for
if
in
into
is
it
no
not
of
on
or
such
that
the
their
then
there
these
they
this
to
was
will
with

View File

@@ -0,0 +1,37 @@
斯基
维奇
诺夫

View File

@@ -0,0 +1,131 @@
丁
万俟
上官
东方
令狐
仲孙
公冶
公孙
公羊
单于
司徒
司空
司马
夏侯
太叔
宇文
宗政
尉迟
慕容
欧阳
淳于
澹台
濮阳
申屠
皇甫
诸葛
赫连
轩辕
钟离
长孙
闻人
闾丘
鲜于

View File

@@ -0,0 +1,56 @@
# Elasticsearch plugin descriptor file
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
#
# A plugin can be 'site', 'jvm', or both.
#
### example site plugin for "foo":
#
# foo.zip <-- zip file for the plugin, with this structure:
# _site/ <-- the contents that will be served
# plugin-descriptor.properties <-- example contents below:
#
# site=true
# description=My cool plugin
# version=1.0
#
### example jvm plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#
# jvm=true
# classname=foo.bar.BazPlugin
# description=My cool plugin
# version=2.0.0-rc1
# elasticsearch.version=2.0
# java.version=1.7
#
### mandatory elements for all plugins:
#
# 'description': simple summary of the plugin
description=IK Analyzer for Elasticsearch
#
# 'version': plugin's version
version=7.17.6
#
# 'name': the plugin name
name=analysis-ik
#
# 'classname': the name of the class to load, fully-qualified.
classname=org.elasticsearch.plugin.analysis.ik.AnalysisIkPlugin
#
# 'java.version' version of java the code is built against
# use the system property java.specification.version
# version string must be a sequence of nonnegative decimal integers
# separated by "."'s and may have leading zeros
java.version=1.8
#
# 'elasticsearch.version' version of elasticsearch compiled against
# You will have to release a new version of the plugin for each new
# elasticsearch release. This version is checked when the plugin
# is loaded so Elasticsearch will refuse to start in the presence of
# plugins with the incorrect elasticsearch.version.
elasticsearch.version=7.17.6

View File

@@ -0,0 +1,4 @@
grant {
// needed because of the hot reload functionality
permission java.net.SocketPermission "*", "connect,resolve";
};

View File

@@ -0,0 +1,4 @@
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://127.0.0.1:9200" ]
monitoring.ui.container.elasticsearch.enabled: true

View File

@@ -0,0 +1,2 @@
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://127.0.0.1:9200" ]

View File

@@ -0,0 +1,14 @@
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json_lines
}
}
output {
elasticsearch {
hosts => "127.0.0.1:9200"
index => "%{[spring.application.name]}-%{+YYYY.MM.dd}"
}
}

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/grafana` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,849 @@
##################### Grafana Configuration Example #####################
#
# Everything has defaults so you only need to uncomment things you want to
# change
# possible values : production, development
;app_mode = production
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
;data = /var/lib/grafana
# Temporary files in `data` directory older than given duration will be removed
;temp_data_lifetime = 24h
# Directory where grafana can store logs
;logs = /var/log/grafana
# Directory where grafana will automatically scan and look for plugins
;plugins = /var/lib/grafana/plugins
# folder that contains provisioning config files that grafana will apply on startup and while running.
;provisioning = conf/provisioning
#################################### Server ####################################
[server]
# Protocol (http, https, h2, socket)
;protocol = http
# The ip address to bind to, empty will bind to all interfaces
;http_addr =
# The http port to use
;http_port = 3000
# The public facing domain name used to access grafana from a browser
;domain = localhost
# Redirect to correct domain if host header does not match domain
# Prevents DNS rebinding attacks
;enforce_domain = false
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
;root_url = %(protocol)s://%(domain)s:%(http_port)s/
# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons.
;serve_from_sub_path = false
# Log web requests
;router_logging = false
# the path relative working path
;static_root_path = public
# enable gzip
;enable_gzip = false
# https certs & key file
;cert_file =
;cert_key =
# Unix socket path
;socket =
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
# as separate properties or as on string using the url properties.
# Either "mysql", "postgres" or "sqlite3", it's your choice
;type = sqlite3
;host = 127.0.0.1:3306
;name = grafana
;user = root
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
# Use either URL or the previous fields to configure the database
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
;ssl_mode = disable
;ca_cert_path =
;client_key_path =
;client_cert_path =
;server_cert_name =
# For "sqlite3" only, path relative to data_path setting
;path = grafana.db
# Max idle conn setting default is 2
;max_idle_conn = 2
# Max conn setting default is 0 (mean not set)
;max_open_conn =
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
;conn_max_lifetime = 14400
# Set to true to log the sql calls and execution times.
;log_queries =
# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
;cache_mode = private
#################################### Cache server #############################
[remote_cache]
# Either "redis", "memcached" or "database" default is "database"
;type = database
# cache connectionstring options
# database: will use Grafana primary database.
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'.
# memcache: 127.0.0.1:11211
;connstr =
#################################### Data proxy ###########################
[dataproxy]
# This enables data proxy logging, default is false
;logging = false
# How long the data proxy waits before timing out, default is 30 seconds.
# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
;timeout = 30
# How many seconds the data proxy waits before sending a keepalive probe request.
;keep_alive_seconds = 30
# How many seconds the data proxy waits for a successful TLS Handshake before timing out.
;tls_handshake_timeout_seconds = 10
# How many seconds the data proxy will wait for a server's first response headers after
# fully writing the request headers if the request has an "Expect: 100-continue"
# header. A value of 0 will result in the body being sent immediately, without
# waiting for the server to approve.
;expect_continue_timeout_seconds = 1
# The maximum number of idle connections that Grafana will keep alive.
;max_idle_connections = 100
# How many seconds the data proxy keeps an idle connection open before timing out.
;idle_conn_timeout_seconds = 90
# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false.
;send_user_header = false
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
# No ip addresses are being tracked, only simple counters to track
# running instances, dashboard and error counts. It is very helpful to us.
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new versions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
;check_for_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
# Google Tag Manager ID, only enabled if you specify an id here
;google_tag_manager_id =
#################################### Security ####################################
[security]
# disable creation of admin user on first start of grafana
;disable_initial_admin_creation = false
# default admin user, created on startup
;admin_user = admin
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# disable gravatar profile images
;disable_gravatar = false
# data source proxy whitelist (ip_or_domain:port separated by spaces)
;data_source_proxy_whitelist =
# disable protection against brute force login attempts
;disable_brute_force_login_protection = false
# set to true if you host Grafana behind HTTPS. default is false.
;cookie_secure = false
# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled"
;cookie_samesite = lax
# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false.
;allow_embedding = false
# Set to true if you want to enable http strict transport security (HSTS) response header.
# This is only sent when HTTPS is enabled in this configuration.
# HSTS tells browsers that the site should only be accessed using HTTPS.
;strict_transport_security = false
# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled.
;strict_transport_security_max_age_seconds = 86400
# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled.
;strict_transport_security_preload = false
# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled.
;strict_transport_security_subdomains = false
# Set to true to enable the X-Content-Type-Options response header.
# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised
# in the Content-Type headers should not be changed and be followed.
;x_content_type_options = true
# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading
# when they detect reflected cross-site scripting (XSS) attacks.
;x_xss_protection = true
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for
# creating and deleting snapshots.
;public_mode = false
# remove expired snapshot
;snapshot_remove_expired = true
#################################### Dashboards History ##################
[dashboards]
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
;versions_to_keep = 20
# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_refresh_interval = 5s
# Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
;default_home_dashboard_path =
#################################### Users ###############################
[users]
# disable user signup / registration
;allow_sign_up = true
# Allow non admin users to create organizations
;allow_org_create = true
# Set to true to automatically assign new users to the default organization (id 1)
;auto_assign_org = true
# Set this value to automatically add new users to the provided organization (if auto_assign_org above is set to true)
;auto_assign_org_id = 1
# Default role new users will be automatically assigned (if disabled above is set to true)
;auto_assign_org_role = Viewer
# Require email validation before sign up completes
;verify_email_enabled = false
# Background text for the user field on the login page
;login_hint = email or username
;password_hint = password
# Default UI theme ("dark" or "light")
;default_theme = dark
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
;external_manage_info =
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
;viewers_can_edit = false
# Editors can administrate dashboard, folders and teams they create
;editors_can_admin = false
# The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes).
;user_invite_max_lifetime_duration = 24h
[auth]
# Login cookie name
;login_cookie_name = grafana_session
# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation.
;login_maximum_inactive_lifetime_duration =
# The maximum lifetime (duration) an authenticated user can be logged in since login time before being required to login. Default is 30 days (30d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month).
;login_maximum_lifetime_duration =
# How often should auth tokens be rotated for authenticated users when being active. The default is each 10 minutes.
;token_rotation_interval_minutes = 10
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
# Set to true to attempt login with OAuth automatically, skipping the login screen.
# This setting is ignored if multiple OAuth providers are configured.
;oauth_auto_login = false
# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
;oauth_state_cookie_max_age = 600
# limit of api_key seconds to live before expiration
;api_key_max_seconds_to_live = -1
# Set to true to enable SigV4 authentication option for HTTP-based datasources.
;sigv4_auth_enabled = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access
;enabled = false
# specify organization name that should be used for unauthenticated users
;org_name = Main Org.
# specify role for unauthenticated users
;org_role = Viewer
# mask the Grafana version number for unauthenticated users
;hide_version = false
#################################### GitHub Auth ##########################
[auth.github]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;auth_url = https://github.com/login/oauth/authorize
;token_url = https://github.com/login/oauth/access_token
;api_url = https://api.github.com/user
;allowed_domains =
;team_ids =
;allowed_organizations =
#################################### GitLab Auth #########################
[auth.gitlab]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = api
;auth_url = https://gitlab.com/oauth/authorize
;token_url = https://gitlab.com/oauth/token
;api_url = https://gitlab.com/api/v4
;allowed_domains =
;allowed_groups =
#################################### Google Auth ##########################
[auth.google]
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
;auth_url = https://accounts.google.com/o/oauth2/auth
;token_url = https://accounts.google.com/o/oauth2/token
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
;allowed_domains =
;hosted_domain =
#################################### Grafana.com Auth ####################
[auth.grafana_com]
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email
;allowed_organizations =
#################################### Azure AD OAuth #######################
[auth.azuread]
;name = Azure AD
;enabled = false
;allow_sign_up = true
;client_id = some_client_id
;client_secret = some_client_secret
;scopes = openid email profile
;auth_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/authorize
;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token
;allowed_domains =
;allowed_groups =
#################################### Okta OAuth #######################
[auth.okta]
;name = Okta
;enabled = false
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = openid profile email groups
;auth_url = https://<tenant-id>.okta.com/oauth2/v1/authorize
;token_url = https://<tenant-id>.okta.com/oauth2/v1/token
;api_url = https://<tenant-id>.okta.com/oauth2/v1/userinfo
;allowed_domains =
;allowed_groups =
;role_attribute_path =
#################################### Generic OAuth ##########################
[auth.generic_oauth]
;enabled = false
;name = OAuth
;allow_sign_up = true
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;email_attribute_name = email:primary
;email_attribute_path =
;login_attribute_path =
;id_token_attribute_name =
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;allowed_domains =
;team_ids =
;allowed_organizations =
;role_attribute_path =
;tls_skip_verify_insecure = false
;tls_client_cert =
;tls_client_key =
;tls_client_ca =
#################################### Basic Auth ##########################
[auth.basic]
;enabled = true
#################################### Auth Proxy ##########################
[auth.proxy]
;enabled = false
;header_name = X-WEBAUTH-USER
;header_property = username
;auto_sign_up = true
;sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
;headers = Email:X-User-Email, Name:X-User-Name
# Read the auth proxy docs for details on what the setting below enables
;enable_login_token = false
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
# LDAP backround sync (Enterprise only)
# At 1 am every day
;sync_cron = "0 0 1 * * *"
;active_sync_enabled = true
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
;host = localhost:25
;user =
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
;password =
;cert_file =
;key_file =
;skip_verify = false
;from_address = admin@grafana.localhost
;from_name = Grafana
# EHLO identity in SMTP dialog (defaults to instance_name)
;ehlo_identity = dashboard.example.com
# SMTP startTLS policy (defaults to 'OpportunisticStartTLS')
;startTLS_policy = NoStartTLS
[emails]
;welcome_email_on_sign_up = false
;templates_pattern = emails/*.html
#################################### Logging ##########################
[log]
# Either "console", "file", "syslog". Default is console and file
# Use space to separate multiple modes, e.g. "console file"
;mode = console file
# Either "debug", "info", "warn", "error", "critical", default is "info"
;level = info
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
;filters =
# For "console" mode only
[log.console]
;level =
# log line format, valid options are text, console and json
;format = console
# For "file" mode only
[log.file]
;level =
# log line format, valid options are text, console and json
;format = text
# This enables automated log rotate(switch of following options), default is true
;log_rotate = true
# Max line number of single file, default is 1000000
;max_lines = 1000000
# Max size shift of single file, default is 28 means 1 << 28, 256MB
;max_size_shift = 28
# Segment log daily, default is true
;daily_rotate = true
# Expired days of log file(delete after max days), default is 7
;max_days = 7
[log.syslog]
;level =
# log line format, valid options are text, console and json
;format = text
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
;network =
;address =
# Syslog facility. user, daemon and local0 through local7 are valid.
;facility =
# Syslog tag. By default, the process' argv[0] is used.
;tag =
#################################### Usage Quotas ########################
[quota]
; enabled = false
#### set quotas to -1 to make unlimited. ####
# limit number of users per Org.
; org_user = 10
# limit number of dashboards per Org.
; org_dashboard = 100
# limit number of data_sources per Org.
; org_data_source = 10
# limit number of api_keys per Org.
; org_api_key = 10
# limit number of orgs a user can create.
; user_org = 10
# Global limit of users.
; global_user = -1
# global limit of orgs.
; global_org = -1
# global limit of dashboards
; global_dashboard = -1
# global limit of api_keys
; global_api_key = -1
# global limit on number of logged in users.
; global_session = -1
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
;execute_alerts = true
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
;error_or_timeout = alerting
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
;nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
# Default setting for alert calculation timeout. Default value is 30
;evaluation_timeout_seconds = 30
# Default setting for alert notification timeout. Default value is 30
;notification_timeout_seconds = 30
# Default setting for max attempts to sending alert notifications. Default value is 3
;max_attempts = 3
# Makes it possible to enforce a minimal interval between evaluations, to reduce load on the backend
;min_interval_seconds = 1
# Configures for how long alert annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_annotation_age =
# Configures max number of alert annotations that Grafana stores. Default value is 0, which keeps all alert annotations.
;max_annotations_to_keep =
#################################### Annotations #########################
[annotations.dashboard]
# Dashboard annotations means that annotations are associated with the dashboard they are created on.
# Configures how long dashboard annotations are stored. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of dashboard annotations that Grafana stores. Default value is 0, which keeps all dashboard annotations.
;max_annotations_to_keep =
[annotations.api]
# API annotations means that the annotations have been created using the API without any
# association with a dashboard.
# Configures how long Grafana stores API annotations. Default is 0, which keeps them forever.
# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month).
;max_age =
# Configures max number of API annotations that Grafana keeps. Default value is 0, which keeps all API annotations.
;max_annotations_to_keep =
#################################### Explore #############################
[explore]
# Enable the Explore section
;enabled = true
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
[metrics]
# Disable / Enable internal metrics
;enabled = true
# Graphite Publish interval
;interval_seconds = 10
# Disable total stats (stat_totals_*) metrics to be generated
;disable_total_stats = false
#If both are set, basic auth will be required for the metrics endpoint.
; basic_auth_username =
; basic_auth_password =
# Metrics environment info adds dimensions to the `grafana_environment_info` metric, which
# can expose more information about the Grafana instance.
[metrics.environment_info]
#exampleLabel1 = exampleValue1
#exampleLabel2 = exampleValue2
# Send internal metrics to Graphite
[metrics.graphite]
# Enable by setting the address setting (ex localhost:2003)
;address =
;prefix = prod.grafana.%(instance_name)s.
#################################### Grafana.com integration ##########################
# Url used to import dashboards directly from Grafana.com
[grafana_com]
;url = https://grafana.com
#################################### Distributed tracing ############
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
;always_included_tag = tag1:value1
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
;sampler_type = const
# jaeger samplerconfig param
# for "const" sampler, 0 or 1 for always false/true respectively
# for "probabilistic" sampler, a probability between 0 and 1
# for "rateLimiting" sampler, the number of spans per second
# for "remote" sampler, param is the same as for "probabilistic"
# and indicates the initial sampling rate before the actual one
# is received from the mothership
;sampler_param = 1
# sampling_server_url is the URL of a sampling manager providing a sampling strategy.
;sampling_server_url =
# Whether or not to use Zipkin propagation (x-b3- HTTP headers).
;zipkin_propagation = false
# Setting this to true disables shared RPC spans.
# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
;disable_shared_zipkin_spans = false
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
# you can choose between (s3, webdav, gcs, azure_blob, local)
;provider =
[external_image_storage.s3]
;endpoint =
;path_style_access =
;bucket =
;region =
;path =
;access_key =
;secret_key =
[external_image_storage.webdav]
;url =
;public_url =
;username =
;password =
[external_image_storage.gcs]
;key_file =
;bucket =
;path =
[external_image_storage.azure_blob]
;account_name =
;account_key =
;container_name =
[external_image_storage.local]
# does not require any configuration
[rendering]
# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer.
# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable Grafana to render panels and dashboards to PNG-images using HTTP requests to an external service.
;server_url =
# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/.
;callback_url =
# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
# which this setting can help protect against by only allowing a certain amount of concurrent requests.
;concurrent_render_request_limit = 30
[panels]
# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
;disable_sanitize_html = false
[plugins]
;enable_alpha = false
;app_tls_skip_verify_insecure = false
# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
;allow_loading_unsigned_plugins =
;marketplace_url = https://grafana.com/grafana/plugins/
#################################### Grafana Image Renderer Plugin ##########################
[plugin.grafana-image-renderer]
# Instruct headless browser instance to use a default timezone when not provided by Grafana, e.g. when rendering panel image of alert.
# See ICUs metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported
# timezone IDs. Fallbacks to TZ environment variable if not set.
;rendering_timezone =
# Instruct headless browser instance to use a default language when not provided by Grafana, e.g. when rendering panel image of alert.
# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'.
;rendering_language =
# Instruct headless browser instance to use a default device scale factor when not provided by Grafana, e.g. when rendering panel image of alert.
# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image.
;rendering_viewport_device_scale_factor =
# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to
# the security risk it's not recommended to ignore HTTPS errors.
;rendering_ignore_https_errors =
# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will
# only capture and log error messages. When enabled, debug messages are captured and logged as well.
# For the verbose information to be included in the Grafana server log you have to adjust the rendering log level to debug, configure
# [log].filter = rendering:debug.
;rendering_verbose_logging =
# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service.
# Default is false. This can be useful to enable (true) when troubleshooting.
;rendering_dumpio =
# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found
# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character.
;rendering_args =
# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium.
# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not
# compatible with the plugin.
;rendering_chrome_bin =
# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request.
# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently.
# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
;rendering_mode =
# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
# and will cluster using browser instances.
# Mode 'context' will cluster using incognito pages.
;rendering_clustering_mode =
# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently..
;rendering_clustering_max_concurrency =
# Limit the maximum viewport width, height and device scale factor that can be requested.
;rendering_viewport_max_width =
;rendering_viewport_max_height =
;rendering_viewport_max_device_scale_factor =
# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign
# a port not in use.
;grpc_host =
;grpc_port =
[enterprise]
# Path to a valid Grafana Enterprise license.jwt file
;license_path =
[feature_toggles]
# enable features, separated by spaces
;enable =
[date_formats]
# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
# Default system date format used in time range picker and other places where full time is displayed
;full_date = YYYY-MM-DD HH:mm:ss
# Used by graph and other places where we only show small intervals
;interval_second = HH:mm:ss
;interval_minute = HH:mm
;interval_hour = MM/DD HH:mm
;interval_day = MM/DD
;interval_month = YYYY-MM
;interval_year = YYYY
# Experimental feature
;use_browser_locale = false
# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
;default_timezone = browser

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/kafka/data` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,4 @@
# 集群配置文件 ip+端口
127.0.0.1:8848
127.0.0.1:8848
127.0.0.1:8848

View File

@@ -0,0 +1,91 @@
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
# 限制body大小
client_max_body_size 100m;
# 开启静态资源压缩
gzip_static on;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
upstream server {
ip_hash;
# gateway 地址
server 127.0.0.1:8080;
# server 127.0.0.1:8081;
}
server {
listen 80;
server_name localhost;
# https配置参考 start
#listen 443 ssl;
# 证书直接存放 /docker/nginx/cert/ 目录下即可 更改证书名称即可 无需更改证书路径
#ssl on;
#ssl_certificate /etc/nginx/cert/xxx.local.crt; # /etc/nginx/cert/ 为docker映射路径 不允许更改
#ssl_certificate_key /etc/nginx/cert/xxx.local.key; # /etc/nginx/cert/ 为docker映射路径 不允许更改
#ssl_session_timeout 5m;
#ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE:ECDH:AES:HIGH:!NULL:!aNULL:!MD5:!ADH:!RC4;
#ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
#ssl_prefer_server_ciphers on;
# https配置参考 end
# 演示环境配置 拦截除 GET POST 之外的所有请求
# if ($request_method !~* GET|POST) {
# rewrite ^/(.*)$ /403;
# }
# location = /403 {
# default_type application/json;
# return 200 '{"msg":"演示模式,不允许操作","code":500}';
# }
# 限制外网访问内网 actuator 相关路径
location ~ ^(/[^/]*)?/actuator.*(/.*)?$ {
return 403;
}
location / {
root /usr/share/nginx/html; # docker映射路径 不允许更改
try_files $uri $uri/ /index.html;
index index.html index.htm;
}
location /prod-api/ {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 86400s;
# sse 与 websocket参数
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_buffering off;
proxy_cache off;
proxy_pass http://server/;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}

View File

@@ -0,0 +1,59 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: 'Prometheus'
static_configs:
- targets: ['127.0.0.1:9090']
- job_name: 'Grafana'
static_configs:
- targets: ['127.0.0.1:3000']
- job_name: 'Nacos'
metrics_path: '/nacos/actuator/prometheus'
static_configs:
- targets: ['127.0.0.1:8848']
# monitor 监控的账号密码
basic_auth:
username: ruoyi
password: 123456
- job_name: RuoYi-Cloud-Plus
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /actuator/prometheus
scheme: http
# monitor 监控的账号密码
basic_auth:
username: ruoyi
password: 123456
http_sd_configs:
- url: 'http://127.0.0.1:9100/actuator/prometheus/sd'
# monitor 监控的账号密码
basic_auth:
username: ruoyi
password: 123456
relabel_configs:
# 如果目标有context_path标签则修改metrics_path以包含该上下文路径
- source_labels: [__meta_http_sd_context_path]
target_label: __metrics_path__
replacement: '${1}/actuator/prometheus'

View File

@@ -0,0 +1,11 @@
FROM rabbitmq:3.13.3-management
LABEL maintainer="Lion Li"
ADD ./rabbitmq_delayed_message_exchange-3.13.0.ez /plugins
RUN rabbitmq-plugins enable rabbitmq_delayed_message_exchange
ENTRYPOINT ["rabbitmq-server"]

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/rabbitmq/data` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rabbitmq/data` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,28 @@
# redis 密码
requirepass ruoyi123
# key 监听器配置
# notify-keyspace-events Ex
# 配置持久化文件存储路径
dir /redis/data
# 配置rdb
# 15分钟内有至少1个key被更改则进行快照
save 900 1
# 5分钟内有至少10个key被更改则进行快照
save 300 10
# 1分钟内有至少10000个key被更改则进行快照
save 60 10000
# 开启压缩
rdbcompression yes
# rdb文件名 用默认的即可
dbfilename dump.rdb
# 开启aof
appendonly yes
# 文件名
appendfilename "appendonly.aof"
# 持久化策略,no:不同步,everysec:每秒一次,always:总是同步,速度比较慢
# appendfsync always
appendfsync everysec
# appendfsync no

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/redis/data` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,26 @@
# 所属集群名称,如果节点较多可以配置多个
brokerClusterName = DefaultCluster
#broker名称master和slave使用相同的名称表明他们的主从关系
brokerName = broker1
#0表示Master大于0表示不同的slave
brokerId = 0
#表示几点做消息删除动作默认是凌晨4点
deleteWhen = 04
#在磁盘上保留消息的时长,单位是小时
fileReservedTime = 48
#有三个值SYNC_MASTERASYNC_MASTERSLAVE同步和异步表示Master和Slave之间同步数据的机制
brokerRole = ASYNC_MASTER
#刷盘策略取值为ASYNC_FLUSHSYNC_FLUSH表示同步刷盘和异步刷盘SYNC_FLUSH消息写入磁盘后才返回成功状态ASYNC_FLUSH不需要
flushDiskType = ASYNC_FLUSH
# 设置broker节点所在服务器的ip地址**这个非常重要,主从模式下从节点会根据主节点的brokerIP2来同步数据如果不配置主从无法同步brokerIP1设置为自己外网能访问的ip服务器双网卡情况下必须配置比如阿里云这种主节点需要配置ip1和ip2从节点只需要配置ip1即可
# 此ip由使用环境决定 本机使用 127 局域网使用 192 外网使用 外网ip
brokerIP1 = 192.168.31.165
#Broker 对外服务的监听端口,
listenPort = 10911
#是否允许Broker自动创建Topic
autoCreateTopicEnable = true
#是否允许 Broker 自动创建订阅组
autoCreateSubscriptionGroup = true
#linux开启epoll
useEpollNativeSelector = true

View File

@@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rocketmq/broker1/logs` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1 @@
数据目录 请执行 `chmod 777 /docker/rocketmq/broker1/store` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1 @@
日志目录 请执行 `chmod 777 /docker/rocketmq/namesrv/logs` 赋予读写权限 否则将无法写入数据

View File

@@ -0,0 +1,119 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of encrypt rule.
#
######################################################################################################
#
#databaseName: encrypt_db
#
#dataSources:
# ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !ENCRYPT
# encryptors:
# aes_encryptor:
# type: AES
# props:
# aes-key-value: 123456abc
# rc4_encryptor:
# type: RC4
# props:
# rc4-key-value: 123456abc
# tables:
# t_encrypt:
# columns:
# user_id:
# cipher:
# name: user_cipher
# encryptorName: aes_encryptor
# order_id:
# cipher:
# name: order_encrypt
# encryptorName: rc4_encryptor
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: encrypt_db
#
#dataSources:
# ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !ENCRYPT
# encryptors:
# aes_encryptor:
# type: AES
# props:
# aes-key-value: 123456abc
# rc4_encryptor:
# type: RC4
# props:
# rc4-key-value: 123456abc
# tables:
# t_encrypt:
# columns:
# user_id:
# cipher:
# name: user_cipher
# encryptorName: aes_encryptor
# order_id:
# cipher:
# name: order_encrypt
# encryptorName: rc4_encryptor

View File

@@ -0,0 +1,127 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of mask rule.
#
######################################################################################################
#
#databaseName: mask_db
#
#dataSources:
# ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !MASK
# tables:
# t_user:
# columns:
# password:
# maskAlgorithm: md5_mask
# email:
# maskAlgorithm: mask_before_special_chars_mask
# telephone:
# maskAlgorithm: keep_first_n_last_m_mask
#
# maskAlgorithms:
# md5_mask:
# type: MD5
# mask_before_special_chars_mask:
# type: MASK_BEFORE_SPECIAL_CHARS
# props:
# special-chars: '@'
# replace-char: '*'
# keep_first_n_last_m_mask:
# type: KEEP_FIRST_N_LAST_M
# props:
# first-n: 3
# last-m: 4
# replace-char: '*'
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: mask_db
#
#dataSources:
# ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !MASK
# tables:
# t_user:
# columns:
# password:
# maskAlgorithm: md5_mask
# email:
# maskAlgorithm: mask_before_special_chars_mask
# telephone:
# maskAlgorithm: keep_first_n_last_m_mask
#
# maskAlgorithms:
# md5_mask:
# type: MD5
# mask_before_special_chars_mask:
# type: MASK_BEFORE_SPECIAL_CHARS
# props:
# special-chars: '@'
# replace-char: '*'
# keep_first_n_last_m_mask:
# type: KEEP_FIRST_N_LAST_M
# props:
# first-n: 3
# last-m: 4
# replace-char: '*'

View File

@@ -0,0 +1,117 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of readwrite-splitting rule.
#
######################################################################################################
#
#databaseName: readwrite_splitting_db
#
#dataSources:
# primary_ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_primary_ds
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# replica_ds_0:
# url: jdbc:postgresql://127.0.0.1:5432/demo_replica_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# replica_ds_1:
# url: jdbc:postgresql://127.0.0.1:5432/demo_replica_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !READWRITE_SPLITTING
# dataSources:
# readwrite_ds:
# writeDataSourceName: primary_ds
# readDataSourceNames:
# - replica_ds_0
# - replica_ds_1
# loadBalancerName: random
# loadBalancers:
# random:
# type: RANDOM
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#databaseName: readwrite_splitting_db
#
#dataSources:
# write_ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_write_ds?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# read_ds_0:
# url: jdbc:mysql://127.0.0.1:3306/demo_read_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# read_ds_1:
# url: jdbc:mysql://127.0.0.1:3306/demo_read_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !READWRITE_SPLITTING
# dataSources:
# readwrite_ds:
# writeDataSourceName: write_ds
# readDataSourceNames:
# - read_ds_0
# - read_ds_1
# loadBalancerName: random
# loadBalancers:
# random:
# type: RANDOM

View File

@@ -0,0 +1,175 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of shadow rule.
#
######################################################################################################
#
#databaseName: shadow_db
#
#dataSources:
# ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_0
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# shadow_ds:
# url: jdbc:postgresql://127.0.0.1:5432/demo_ds_1
# username: postgres
# password: postgres
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !SHADOW
# dataSources:
# shadowDataSource:
# productionDataSourceName: ds
# shadowDataSourceName: shadow_ds
# tables:
# t_order:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# t_order_item:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_update_match_algorithm
# - user_id_select_match_algorithm
# t_address:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# - sql_hint_algorithm
# shadowAlgorithms:
# user_id_insert_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: insert
# column: user_id
# regex: "[1]"
# user_id_update_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: update
# column: user_id
# regex: "[1]"
# user_id_select_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: select
# column: user_id
# regex: "[1]"
# sql_hint_algorithm:
# type: SQL_HINT
# props:
# foo: bar
######################################################################################################
#
# If you want to connect to MySQL, you should manually copy MySQL driver to lib directory.
#
######################################################################################################
#
#databaseName: shadow_db
#
#dataSources:
# ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_0?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
# shadow_ds:
# url: jdbc:mysql://127.0.0.1:3306/demo_ds_1?serverTimezone=UTC&useSSL=false
# username: root
# password:
# connectionTimeoutMilliseconds: 30000
# idleTimeoutMilliseconds: 60000
# maxLifetimeMilliseconds: 1800000
# maxPoolSize: 50
# minPoolSize: 1
#
#rules:
#- !SHADOW
# dataSources:
# shadowDataSource:
# productionDataSourceName: ds
# shadowDataSourceName: shadow_ds
# tables:
# t_order:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# t_order_item:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_update_match_algorithm
# - user_id_select_match_algorithm
# t_address:
# dataSourceNames:
# - shadowDataSource
# shadowAlgorithmNames:
# - user_id_insert_match_algorithm
# - user_id_select_match_algorithm
# - sql_hint_algorithm
# shadowAlgorithms:
# user_id_insert_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: insert
# column: user_id
# regex: "[1]"
# user_id_update_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: update
# column: user_id
# regex: "[1]"
# user_id_select_match_algorithm:
# type: REGEX_MATCH
# props:
# operation: select
# column: user_id
# regex: "[1]"
# sql_hint_algorithm:
# type: SQL_HINT
# props:
# foo: bar

View File

@@ -0,0 +1,124 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# Here you can configure the rules for the proxy.
# This example is configuration of sharding rule.
#
######################################################################################################
databaseName: data-center_db
dataSources:
ds_0:
url: jdbc:mysql://localhost:3306/data-center_0?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&autoReconnect=true&rewriteBatchedStatements=true
username: root
password: root
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
ds_1:
url: jdbc:mysql://localhost:3306/data-center_1?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8&autoReconnect=true&rewriteBatchedStatements=true
username: root
password: root
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
rules:
- !SHARDING
tables: # 数据分片规则配置
t_order: # 订单逻辑表名称
actualDataNodes: ds_${0..1}.t_order_${0..1}
databaseStrategy: # 配置分库策略
standard:
shardingColumn: user_id
shardingAlgorithmName: database_user_inline
tableStrategy: # 分表策略
standard:
shardingColumn: order_id
shardingAlgorithmName: t_order_inline
keyGenerateStrategy:
column: order_id
keyGeneratorName: snowflake
# auditStrategy:
# auditorNames:
# - sharding_key_required_auditor
# allowHintDisable: true
t_order_item: # 子订单逻辑表名称
actualDataNodes: ds_${0..1}.t_order_item_${0..1}
databaseStrategy: # 配置分库策略
standard:
shardingColumn: user_id
shardingAlgorithmName: database_user_inline
tableStrategy: # 分表策略
standard:
shardingColumn: order_id
shardingAlgorithmName: t_order_item_inline
keyGenerateStrategy:
column: order_item_id
keyGeneratorName: snowflake
bindingTables: # 绑定表规则列表
- t_order,t_order_item
# defaultDatabaseStrategy:
# standard:
# shardingColumn: user_id
# shardingAlgorithmName: database_user_inline
# defaultTableStrategy:
# none:
# defaultAuditStrategy:
# auditorNames:
# - sharding_key_required_auditor
# allowHintDisable: true
# 分片算法配置
shardingAlgorithms:
database_user_inline:
type: INLINE
props:
algorithm-expression: ds_${user_id % 2}
t_order_inline: # 订单表分片算法名称
type: INLINE
props:
algorithm-expression: t_order_${order_id % 2}
allow-range-query-with-inline-sharding: true
t_order_item_inline: # 子订单表分片算法名称
type: INLINE
props:
algorithm-expression: t_order_item_${order_id % 2}
allow-range-query-with-inline-sharding: true
# 分布式序列算法配置
keyGenerators:
snowflake:
type: SNOWFLAKE
props:
worker-id: 1
# auditors:
# sharding_key_required_auditor:
# type: DML_SHARDING_CONDITIONS
# - !BROADCAST
# tables:
# - t_address

View File

@@ -0,0 +1,92 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################################################################################################
#
# If you want to configure governance, authorization and proxy properties, please refer to this file.
#
######################################################################################################
# mode:
# type: Cluster
# repository:
# type: ZooKeeper
# props:
# namespace: governance_ds
# server-lists: localhost:2181
# retryIntervalMilliseconds: 500
# timeToLiveSeconds: 60
# maxRetries: 3
# operationTimeoutMilliseconds: 500
authority:
users:
- user: root@%
password: root
- user: sharding
password: sharding
privilege:
type: ALL_PERMITTED
transaction:
defaultType: XA
providerType: Atomikos
sqlParser:
sqlCommentParseEnabled: false
sqlStatementCache:
initialCapacity: 2000
maximumSize: 65535
parseTreeCache:
initialCapacity: 128
maximumSize: 1024
logging:
loggers:
- loggerName: ShardingSphere-SQL
additivity: true
level: INFO
props:
enable: false
sqlFederation:
sqlFederationEnabled: false
executionPlanCache:
initialCapacity: 2000
maximumSize: 65535
props:
system-log-level: INFO
max-connections-size-per-query: 1
kernel-executor-size: 16 # Infinite by default.
proxy-frontend-flush-threshold: 128 # The default value is 128.
# sql-show is the same as props in logger ShardingSphere-SQL, and its priority is lower than logging rule
sql-show: false
check-table-metadata-enabled: false
# Proxy backend query fetch size. A larger value may increase the memory usage of ShardingSphere Proxy.
# The default value is -1, which means set the minimum value for different JDBC drivers.
proxy-backend-query-fetch-size: -1
proxy-frontend-executor-size: 0 # Proxy frontend executor size. The default value is 0, which means let Netty decide.
proxy-frontend-max-connections: 0 # Less than or equal to 0 means no limitation.
proxy-default-port: 3307 # Proxy default port.
proxy-netty-backlog: 1024 # Proxy netty backlog.
cdc-server-port: 33071 # CDC server port
proxy-frontend-ssl-enabled: false
proxy-frontend-ssl-cipher: ''
proxy-frontend-ssl-version: TLSv1.2,TLSv1.3

View File

@@ -0,0 +1,14 @@
# 说明
项目使用 `skywalking` 官方 `agent` 探针做了精简和扩充
<br>
从官方自带的插件库 删除了项目中大概率不会用到的插件
<br>
保留了项目中可能会用到的插件
<br>
扩展了一些官方不支持的插件
<br>
插件过多会导致很严重的性能问题 建议不要用过多插件
# 扩展
项目开发中遇到一些插件包内没有的功能可以去 `skywalking` 官方下载
<br>
将下载好的插件放入 `plugins` 目录下即可

View File

@@ -0,0 +1,233 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache SkyWalking Subcomponents:
The Apache SkyWalking project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
raphw (byte-buddy) 1.14.9: http://bytebuddy.net/ , Apache 2.0
Google: grpc-java 1.53.0: https://github.com/grpc/grpc-java, Apache 2.0
Google: gson 2.8.9: https://github.com/google/gson , Apache 2.0
Google: proto-google-common-protos 2.0.1: https://github.com/googleapis/googleapis , Apache 2.0
Google: jsr305 3.0.2: http://central.maven.org/maven2/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.pom , Apache 2.0
Google: guava 32.0.1: https://github.com/google/guava , Apache 2.0
netty 4.1.100: https://github.com/netty/netty/blob/4.1/LICENSE.txt, Apache 2.0
========================================================================
BSD licenses
========================================================================
The following components are provided under a BSD license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
asm 9.2:https://gitlab.ow2.org , BSD-3-Clause

View File

@@ -0,0 +1,299 @@
Apache SkyWalking
Copyright 2017-2024 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
========================================================================
grpc-java NOTICE
========================================================================
Copyright 2014, gRPC Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-----------------------------------------------------------------------
This product contains a modified portion of 'OkHttp', an open source
HTTP & SPDY client for Android and Java applications, which can be obtained
at:
* LICENSE:
* okhttp/third_party/okhttp/LICENSE (Apache License 2.0)
* HOMEPAGE:
* https://github.com/square/okhttp
* LOCATION_IN_GRPC:
* okhttp/third_party/okhttp
This product contains a modified portion of 'Netty', an open source
networking library, which can be obtained at:
* LICENSE:
* netty/third_party/netty/LICENSE.txt (Apache License 2.0)
* HOMEPAGE:
* https://netty.io
* LOCATION_IN_GRPC:
* netty/third_party/netty
========================================================================
grpc NOTICE
========================================================================
Copyright 2014 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
========================================================================
netty NOTICE
========================================================================
The Netty Project
=================
Please visit the Netty web site for more information:
* http://netty.io/
Copyright 2014 The Netty Project
The Netty Project licenses this file to you under the Apache License,
version 2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Also, please refer to each LICENSE.<component>.txt file, which is located in
the 'license' directory of the distribution file, for the license terms of the
components that this product depends on.
-------------------------------------------------------------------------------
This product contains the extensions to Java Collections Framework which has
been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
* LICENSE:
* license/LICENSE.jsr166y.txt (Public Domain)
* HOMEPAGE:
* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
* http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
This product contains a modified version of Robert Harder's Public Domain
Base64 Encoder and Decoder, which can be obtained at:
* LICENSE:
* license/LICENSE.base64.txt (Public Domain)
* HOMEPAGE:
* http://iharder.sourceforge.net/current/java/base64/
This product contains a modified portion of 'Webbit', an event based
WebSocket and HTTP server, which can be obtained at:
* LICENSE:
* license/LICENSE.webbit.txt (BSD License)
* HOMEPAGE:
* https://github.com/joewalnes/webbit
This product contains a modified portion of 'SLF4J', a simple logging
facade for Java, which can be obtained at:
* LICENSE:
* license/LICENSE.slf4j.txt (MIT License)
* HOMEPAGE:
* http://www.slf4j.org/
This product contains a modified portion of 'Apache Harmony', an open source
Java SE, which can be obtained at:
* NOTICE:
* license/NOTICE.harmony.txt
* LICENSE:
* license/LICENSE.harmony.txt (Apache License 2.0)
* HOMEPAGE:
* http://archive.apache.org/dist/harmony/
This product contains a modified portion of 'jbzip2', a Java bzip2 compression
and decompression library written by Matthew J. Francis. It can be obtained at:
* LICENSE:
* license/LICENSE.jbzip2.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jbzip2/
This product contains a modified portion of 'libdivsufsort', a C API library to construct
the suffix array and the Burrows-Wheeler transformed string for any input string of
a constant-size alphabet written by Yuta Mori. It can be obtained at:
* LICENSE:
* license/LICENSE.libdivsufsort.txt (MIT License)
* HOMEPAGE:
* https://github.com/y-256/libdivsufsort
This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
which can be obtained at:
* LICENSE:
* license/LICENSE.jctools.txt (ASL2 License)
* HOMEPAGE:
* https://github.com/JCTools/JCTools
This product optionally depends on 'JZlib', a re-implementation of zlib in
pure Java, which can be obtained at:
* LICENSE:
* license/LICENSE.jzlib.txt (BSD style License)
* HOMEPAGE:
* http://www.jcraft.com/jzlib/
This product optionally depends on 'Compress-LZF', a Java library for encoding and
decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
* LICENSE:
* license/LICENSE.compress-lzf.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/ning/compress
This product optionally depends on 'lz4', a LZ4 Java compression
and decompression library written by Adrien Grand. It can be obtained at:
* LICENSE:
* license/LICENSE.lz4.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jpountz/lz4-java
This product optionally depends on 'lzma-java', a LZMA Java compression
and decompression library, which can be obtained at:
* LICENSE:
* license/LICENSE.lzma-java.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/jponge/lzma-java
This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
and decompression library written by William Kinney. It can be obtained at:
* LICENSE:
* license/LICENSE.jfastlz.txt (MIT License)
* HOMEPAGE:
* https://code.google.com/p/jfastlz/
This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
interchange format, which can be obtained at:
* LICENSE:
* license/LICENSE.protobuf.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/protobuf
This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
a temporary self-signed X.509 certificate when the JVM does not provide the
equivalent functionality. It can be obtained at:
* LICENSE:
* license/LICENSE.bouncycastle.txt (MIT License)
* HOMEPAGE:
* http://www.bouncycastle.org/
This product optionally depends on 'Snappy', a compression library produced
by Google Inc, which can be obtained at:
* LICENSE:
* license/LICENSE.snappy.txt (New BSD License)
* HOMEPAGE:
* https://github.com/google/snappy
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
* LICENSE:
* license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
* HOMEPAGE:
* http://www.jboss.org/jbossmarshalling
This product optionally depends on 'Caliper', Google's micro-
benchmarking framework, which can be obtained at:
* LICENSE:
* license/LICENSE.caliper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/google/caliper
This product optionally depends on 'Apache Commons Logging', a logging
framework, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-logging.txt (Apache License 2.0)
* HOMEPAGE:
* http://commons.apache.org/logging/
This product optionally depends on 'Apache Log4J', a logging framework, which
can be obtained at:
* LICENSE:
* license/LICENSE.log4j.txt (Apache License 2.0)
* HOMEPAGE:
* http://logging.apache.org/log4j/
This product optionally depends on 'Aalto XML', an ultra-high performance
non-blocking XML processor, which can be obtained at:
* LICENSE:
* license/LICENSE.aalto-xml.txt (Apache License 2.0)
* HOMEPAGE:
* http://wiki.fasterxml.com/AaltoHome
This product contains a modified version of 'HPACK', a Java implementation of
the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
* LICENSE:
* license/LICENSE.hpack.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/twitter/hpack
This product contains a modified portion of 'Apache Commons Lang', a Java library
provides utilities for the java.lang API, which can be obtained at:
* LICENSE:
* license/LICENSE.commons-lang.txt (Apache License 2.0)
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build.
* LICENSE:
* license/LICENSE.mvn-wrapper.txt (Apache License 2.0)
* HOMEPAGE:
* https://github.com/takari/maven-wrapper

View File

@@ -0,0 +1,331 @@
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The service name in UI
# ${service name} = [${group name}::]${logic name}
# The group name is optional only.
agent.service_name=${SW_AGENT_NAME:Your_ApplicationName}
agent.service_name#length=${SW_AGENT_NAME_MAX_LENGTH:50}
# The agent namespace
agent.namespace=${SW_AGENT_NAMESPACE:}
# The agent cluster
agent.cluster=${SW_AGENT_CLUSTER:}
# The number of sampled traces per 3 seconds
# Negative or zero means off, by default
agent.sample_n_per_3_secs=${SW_AGENT_SAMPLE:-1}
# Authentication active is based on backend setting, see application.yml for more details.
agent.authentication=${SW_AGENT_AUTHENTICATION:}
# The max number of TraceSegmentRef in a single span to keep memory cost estimatable.
agent.trace_segment_ref_limit_per_span=${SW_TRACE_SEGMENT_LIMIT:500}
# The max amount of spans in a single segment.
# Through this config item, SkyWalking keep your application memory cost estimated.
agent.span_limit_per_segment=${SW_AGENT_SPAN_LIMIT:300}
# If the operation name of the first span is included in this set, this segment should be ignored. Multiple values should be separated by `,`.
agent.ignore_suffix=${SW_AGENT_IGNORE_SUFFIX:.jpg,.jpeg,.js,.css,.png,.bmp,.gif,.ico,.mp3,.mp4,.html,.svg}
# If true, SkyWalking agent will save all instrumented classes files in `/debugging` folder.
# SkyWalking team may ask for these files in order to resolve compatible problem.
agent.is_open_debugging_class=${SW_AGENT_OPEN_DEBUG:false}
# Instance name is the identity of an instance, should be unique in the service. If empty, SkyWalking agent will
# generate an 32-bit uuid. BY Default, SkyWalking uses UUID@hostname as the instance name. Max length is 50(UTF-8 char)
agent.instance_name=${SW_AGENT_INSTANCE_NAME:}
agent.instance_name#length=${SW_AGENT_INSTANCE_NAME_MAX_LENGTH:50}
# service instance properties in json format. e.g. agent.instance_properties_json = {"org": "apache-skywalking"}
agent.instance_properties_json=${SW_INSTANCE_PROPERTIES_JSON:}
# How depth the agent goes, when log all cause exceptions.
agent.cause_exception_depth=${SW_AGENT_CAUSE_EXCEPTION_DEPTH:5}
# Force reconnection period of grpc, based on grpc_channel_check_interval.
agent.force_reconnection_period=${SW_AGENT_FORCE_RECONNECTION_PERIOD:1}
# The operationName max length
# Notice, in the current practice, we don't recommend the length over 190.
agent.operation_name_threshold=${SW_AGENT_OPERATION_NAME_THRESHOLD:150}
# sky服务端不可用也保持跟踪
# Keep tracing even the backend is not available if this value is true.
agent.keep_tracing=${SW_AGENT_KEEP_TRACING:true}
# The agent use gRPC plain text in default.
# If true, SkyWalking agent uses TLS even no CA file detected.
agent.force_tls=${SW_AGENT_FORCE_TLS:false}
# gRPC SSL trusted ca file.
agent.ssl_trusted_ca_path=${SW_AGENT_SSL_TRUSTED_CA_PATH:/ca/ca.crt}
# enable mTLS when ssl_key_path and ssl_cert_chain_path exist.
agent.ssl_key_path=${SW_AGENT_SSL_KEY_PATH:}
agent.ssl_cert_chain_path=${SW_AGENT_SSL_CERT_CHAIN_PATH:}
# Enable the agent kernel services and instrumentation.
agent.enable=${SW_AGENT_ENABLE:true}
# Limit the length of the ipv4 list size.
osinfo.ipv4_list_size=${SW_AGENT_OSINFO_IPV4_LIST_SIZE:10}
# grpc channel status check interval.
collector.grpc_channel_check_interval=${SW_AGENT_COLLECTOR_GRPC_CHANNEL_CHECK_INTERVAL:30}
# Agent heartbeat report period. Unit, second.
collector.heartbeat_period=${SW_AGENT_COLLECTOR_HEARTBEAT_PERIOD:30}
# The agent sends the instance properties to the backend every
# collector.heartbeat_period * collector.properties_report_period_factor seconds
collector.properties_report_period_factor=${SW_AGENT_COLLECTOR_PROPERTIES_REPORT_PERIOD_FACTOR:10}
# Backend service addresses.
# collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:127.0.0.1:11800}
# How long grpc client will timeout in sending data to upstream. Unit is second.
collector.grpc_upstream_timeout=${SW_AGENT_COLLECTOR_GRPC_UPSTREAM_TIMEOUT:30}
# Sniffer get profile task list interval.
collector.get_profile_task_interval=${SW_AGENT_COLLECTOR_GET_PROFILE_TASK_INTERVAL:20}
# Sniffer get agent dynamic config interval.
collector.get_agent_dynamic_config_interval=${SW_AGENT_COLLECTOR_GET_AGENT_DYNAMIC_CONFIG_INTERVAL:20}
# If true, skywalking agent will enable periodically resolving DNS to update receiver service addresses.
collector.is_resolve_dns_periodically=${SW_AGENT_COLLECTOR_IS_RESOLVE_DNS_PERIODICALLY:false}
# Logging level
logging.level=${SW_LOGGING_LEVEL:WARN}
# Logging file_name
logging.file_name=${SW_LOGGING_FILE_NAME:skywalking-api.log}
# Log output. Default is FILE. Use CONSOLE means output to stdout.
logging.output=${SW_LOGGING_OUTPUT:FILE}
# Log files directory. Default is blank string, meaning use "{theSkywalkingAgentJarDir}/logs " to output logs.
# {theSkywalkingAgentJarDir} is the directory where the skywalking agent jar file is located
logging.dir=${SW_LOGGING_DIR:}
# Logger resolver: PATTERN or JSON. The default is PATTERN, which uses logging.pattern to print traditional text logs.
# JSON resolver prints logs in JSON format.
logging.resolver=${SW_LOGGING_RESOLVER:PATTERN}
# Logging format. There are all conversion specifiers:
# * %level means log level.
# * %timestamp means now of time with format yyyy-MM-dd HH:mm:ss:SSS.
# * %thread means name of current thread.
# * %msg means some message which user logged.
# * %class means SimpleName of TargetClass.
# * %throwable means a throwable which user called.
# * %agent_name means agent.service_name. Only apply to the PatternLogger.
logging.pattern=${SW_LOGGING_PATTERN:%level %timestamp %thread %class : %msg %throwable}
# Logging max_file_size, default: 300 * 1024 * 1024 = 314572800
logging.max_file_size=${SW_LOGGING_MAX_FILE_SIZE:314572800}
# The max history log files. When rollover happened, if log files exceed this number,
# then the oldest file will be delete. Negative or zero means off, by default.
logging.max_history_files=${SW_LOGGING_MAX_HISTORY_FILES:-1}
# Listed exceptions would not be treated as an error. Because in some codes, the exception is being used as a way of controlling business flow.
# Besides, the annotation named IgnoredException in the trace toolkit is another way to configure ignored exceptions.
statuscheck.ignored_exceptions=${SW_STATUSCHECK_IGNORED_EXCEPTIONS:}
# The max recursive depth when checking the exception traced by the agent. Typically, we don't recommend setting this more than 10, which could cause a performance issue. Negative value and 0 would be ignored, which means all exceptions would make the span tagged in error status.
statuscheck.max_recursive_depth=${SW_STATUSCHECK_MAX_RECURSIVE_DEPTH:1}
# Max element count in the correlation context
correlation.element_max_number=${SW_CORRELATION_ELEMENT_MAX_NUMBER:3}
# Max value length of each element.
correlation.value_max_length=${SW_CORRELATION_VALUE_MAX_LENGTH:128}
# Tag the span by the key/value in the correlation context, when the keys listed here exist.
correlation.auto_tag_keys=${SW_CORRELATION_AUTO_TAG_KEYS:}
# The buffer size of collected JVM info.
jvm.buffer_size=${SW_JVM_BUFFER_SIZE:600}
# The period in seconds of JVM metrics collection. Unit is second.
jvm.metrics_collect_period=${SW_JVM_METRICS_COLLECT_PERIOD:1}
# The buffer channel size.
buffer.channel_size=${SW_BUFFER_CHANNEL_SIZE:5}
# The buffer size.
buffer.buffer_size=${SW_BUFFER_BUFFER_SIZE:300}
# If true, skywalking agent will enable profile when user create a new profile task. Otherwise disable profile.
profile.active=${SW_AGENT_PROFILE_ACTIVE:true}
# Parallel monitor endpoint thread count
profile.max_parallel=${SW_AGENT_PROFILE_MAX_PARALLEL:5}
# Max monitoring sub-tasks count of one single endpoint access
profile.max_accept_sub_parallel=${SW_AGENT_PROFILE_MAX_ACCEPT_SUB_PARALLEL:5}
# Max monitor segment time(minutes), if current segment monitor time out of limit, then stop it.
profile.duration=${SW_AGENT_PROFILE_DURATION:10}
# Max dump thread stack depth
profile.dump_max_stack_depth=${SW_AGENT_PROFILE_DUMP_MAX_STACK_DEPTH:500}
# Snapshot transport to backend buffer size
profile.snapshot_transport_buffer_size=${SW_AGENT_PROFILE_SNAPSHOT_TRANSPORT_BUFFER_SIZE:4500}
# If true, the agent collects and reports metrics to the backend.
meter.active=${SW_METER_ACTIVE:true}
# Report meters interval. The unit is second
meter.report_interval=${SW_METER_REPORT_INTERVAL:20}
# Max size of the meter pool
meter.max_meter_size=${SW_METER_MAX_METER_SIZE:500}
# The max size of message to send to server.Default is 10 MB
log.max_message_size=${SW_GRPC_LOG_MAX_MESSAGE_SIZE:10485760}
# Mount the specific folders of the plugins. Plugins in mounted folders would work.
plugin.mount=${SW_MOUNT_FOLDERS:plugins,activations}
# Peer maximum description limit.
plugin.peer_max_length=${SW_PLUGIN_PEER_MAX_LENGTH:200}
# Exclude some plugins define in plugins dir.Plugin names is defined in [Agent plugin list](Plugin-list.md)
plugin.exclude_plugins=${SW_EXCLUDE_PLUGINS:}
# If true, trace all the parameters in MongoDB access, default is false. Only trace the operation, not include parameters.
plugin.mongodb.trace_param=${SW_PLUGIN_MONGODB_TRACE_PARAM:false}
# If set to positive number, the `WriteRequest.params` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
plugin.mongodb.filter_length_limit=${SW_PLUGIN_MONGODB_FILTER_LENGTH_LIMIT:256}
# If true, trace all the DSL(Domain Specific Language) in ElasticSearch access, default is false.
plugin.elasticsearch.trace_dsl=${SW_PLUGIN_ELASTICSEARCH_TRACE_DSL:false}
# If true, the fully qualified method name will be used as the endpoint name instead of the request URL, default is false.
plugin.springmvc.use_qualified_name_as_endpoint_name=${SW_PLUGIN_SPRINGMVC_USE_QUALIFIED_NAME_AS_ENDPOINT_NAME:false}
# If true, the fully qualified method name will be used as the operation name instead of the given operation name, default is false.
plugin.toolkit.use_qualified_name_as_operation_name=${SW_PLUGIN_TOOLKIT_USE_QUALIFIED_NAME_AS_OPERATION_NAME:false}
# If set to true, the parameters of the sql (typically `java.sql.PreparedStatement`) would be collected.
plugin.jdbc.trace_sql_parameters=${SW_JDBC_TRACE_SQL_PARAMETERS:false}
# If set to positive number, the `db.sql.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
plugin.jdbc.sql_parameters_max_length=${SW_PLUGIN_JDBC_SQL_PARAMETERS_MAX_LENGTH:512}
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
plugin.jdbc.sql_body_max_length=${SW_PLUGIN_JDBC_SQL_BODY_MAX_LENGTH:2048}
# If true, trace all the query parameters(include deleteByIds and deleteByQuery) in Solr query request, default is false.
plugin.solrj.trace_statement=${SW_PLUGIN_SOLRJ_TRACE_STATEMENT:false}
# If true, trace all the operation parameters in Solr request, default is false.
plugin.solrj.trace_ops_params=${SW_PLUGIN_SOLRJ_TRACE_OPS_PARAMS:false}
# If true, trace all middleware/business handlers that are part of the Light4J handler chain for a request.
plugin.light4j.trace_handler_chain=${SW_PLUGIN_LIGHT4J_TRACE_HANDLER_CHAIN:false}
# If true, the transaction definition name will be simplified.
plugin.springtransaction.simplify_transaction_definition_name=${SW_PLUGIN_SPRINGTRANSACTION_SIMPLIFY_TRANSACTION_DEFINITION_NAME:false}
# Threading classes (`java.lang.Runnable` and `java.util.concurrent.Callable`) and their subclasses, including anonymous inner classes whose name match any one of the `THREADING_CLASS_PREFIXES` (splitted by `,`) will be instrumented, make sure to only specify as narrow prefixes as what you're expecting to instrument, (`java.` and `javax.` will be ignored due to safety issues)
plugin.jdkthreading.threading_class_prefixes=${SW_PLUGIN_JDKTHREADING_THREADING_CLASS_PREFIXES:}
# This config item controls that whether the Tomcat plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
plugin.tomcat.collect_http_params=${SW_PLUGIN_TOMCAT_COLLECT_HTTP_PARAMS:false}
# This config item controls that whether the SpringMVC plugin should collect the parameters of the request, when your Spring application is based on Tomcat, consider only setting either `plugin.tomcat.collect_http_params` or `plugin.springmvc.collect_http_params`. Also, activate implicitly in the profiled trace.
plugin.springmvc.collect_http_params=${SW_PLUGIN_SPRINGMVC_COLLECT_HTTP_PARAMS:false}
# This config item controls that whether the HttpClient plugin should collect the parameters of the request
plugin.httpclient.collect_http_params=${SW_PLUGIN_HTTPCLIENT_COLLECT_HTTP_PARAMS:false}
# When `COLLECT_HTTP_PARAMS` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete parameters, NB. this config item is added for the sake of performance.
plugin.http.http_params_length_threshold=${SW_PLUGIN_HTTP_HTTP_PARAMS_LENGTH_THRESHOLD:1024}
# When `include_http_headers` declares header names, this threshold controls the length limitation of all header values. use negative values to keep and send the complete headers. Note. this config item is added for the sake of performance.
plugin.http.http_headers_length_threshold=${SW_PLUGIN_HTTP_HTTP_HEADERS_LENGTH_THRESHOLD:2048}
# Set the header names, which should be collected by the plugin. Header name must follow `javax.servlet.http` definition. Multiple names should be split by comma.
plugin.http.include_http_headers=${SW_PLUGIN_HTTP_INCLUDE_HTTP_HEADERS:}
# This config item controls that whether the Feign plugin should collect the http body of the request.
plugin.feign.collect_request_body=${SW_PLUGIN_FEIGN_COLLECT_REQUEST_BODY:false}
# When `COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
plugin.feign.filter_length_limit=${SW_PLUGIN_FEIGN_FILTER_LENGTH_LIMIT:1024}
# When `COLLECT_REQUEST_BODY` is enabled and content-type start with SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
plugin.feign.supported_content_types_prefix=${SW_PLUGIN_FEIGN_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
# If true, trace all the influxql(query and write) in InfluxDB access, default is true.
plugin.influxdb.trace_influxql=${SW_PLUGIN_INFLUXDB_TRACE_INFLUXQL:true}
# Apache Dubbo consumer collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
plugin.dubbo.collect_consumer_arguments=${SW_PLUGIN_DUBBO_COLLECT_CONSUMER_ARGUMENTS:false}
# When `plugin.dubbo.collect_consumer_arguments` is `true`, Arguments of length from the front will to the OAP backend
plugin.dubbo.consumer_arguments_length_threshold=${SW_PLUGIN_DUBBO_CONSUMER_ARGUMENTS_LENGTH_THRESHOLD:256}
# Apache Dubbo provider collect `arguments` in RPC call, use `Object#toString` to collect `arguments`.
plugin.dubbo.collect_provider_arguments=${SW_PLUGIN_DUBBO_COLLECT_PROVIDER_ARGUMENTS:false}
# When `plugin.dubbo.collect_provider_arguments` is `true`, Arguments of length from the front will to the OAP backend
plugin.dubbo.provider_arguments_length_threshold=${SW_PLUGIN_DUBBO_PROVIDER_ARGUMENTS_LENGTH_THRESHOLD:256}
# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
plugin.kafka.bootstrap_servers=${SW_KAFKA_BOOTSTRAP_SERVERS:localhost:9092}
# Timeout period of reading topics from the Kafka server, the unit is second.
plugin.kafka.get_topic_timeout=${SW_GET_TOPIC_TIMEOUT:10}
# Kafka producer configuration. Read [producer configure](http://kafka.apache.org/24/documentation.html#producerconfigs)
# to get more details. Check document for more details and examples.
plugin.kafka.producer_config=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG:}
# Configure Kafka Producer configuration in JSON format. Notice it will be overridden by plugin.kafka.producer_config[key], if the key duplication.
plugin.kafka.producer_config_json=${SW_PLUGIN_KAFKA_PRODUCER_CONFIG_JSON:}
# Specify which Kafka topic name for Meter System data to report to.
plugin.kafka.topic_meter=${SW_PLUGIN_KAFKA_TOPIC_METER:skywalking-meters}
# Specify which Kafka topic name for JVM metrics data to report to.
plugin.kafka.topic_metrics=${SW_PLUGIN_KAFKA_TOPIC_METRICS:skywalking-metrics}
# Specify which Kafka topic name for traces data to report to.
plugin.kafka.topic_segment=${SW_PLUGIN_KAFKA_TOPIC_SEGMENT:skywalking-segments}
# Specify which Kafka topic name for Thread Profiling snapshot to report to.
plugin.kafka.topic_profiling=${SW_PLUGIN_KAFKA_TOPIC_PROFILINGS:skywalking-profilings}
# Specify which Kafka topic name for the register or heartbeat data of Service Instance to report to.
plugin.kafka.topic_management=${SW_PLUGIN_KAFKA_TOPIC_MANAGEMENT:skywalking-managements}
# Specify which Kafka topic name for the logging data to report to.
plugin.kafka.topic_logging=${SW_PLUGIN_KAFKA_TOPIC_LOGGING:skywalking-logs}
# isolate multi OAP server when using same Kafka cluster (final topic name will append namespace before Kafka topics with `-` ).
plugin.kafka.namespace=${SW_KAFKA_NAMESPACE:}
# Specify which class to decode encoded configuration of kafka.You can set encoded information in `plugin.kafka.producer_config_json` or `plugin.kafka.producer_config` if you need.
plugin.kafka.decode_class=${SW_KAFKA_DECODE_CLASS:}
# Match spring beans with regular expression for the class name. Multiple expressions could be separated by a comma. This only works when `Spring annotation plugin` has been activated.
plugin.springannotation.classname_match_regex=${SW_SPRINGANNOTATION_CLASSNAME_MATCH_REGEX:}
# Whether or not to transmit logged data as formatted or un-formatted.
plugin.toolkit.log.transmit_formatted=${SW_PLUGIN_TOOLKIT_LOG_TRANSMIT_FORMATTED:true}
# If set to true, the parameters of Redis commands would be collected by Lettuce agent.
plugin.lettuce.trace_redis_parameters=${SW_PLUGIN_LETTUCE_TRACE_REDIS_PARAMETERS:false}
# If set to positive number and `plugin.lettuce.trace_redis_parameters` is set to `true`, Redis command parameters would be collected and truncated to this length.
plugin.lettuce.redis_parameter_max_length=${SW_PLUGIN_LETTUCE_REDIS_PARAMETER_MAX_LENGTH:128}
# Specify which command should be converted to write operation
plugin.lettuce.operation_mapping_write=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
# Specify which command should be converted to read operation
plugin.lettuce.operation_mapping_read=${SW_PLUGIN_LETTUCE_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
# If set to true, the parameters of the cypher would be collected.
plugin.neo4j.trace_cypher_parameters=${SW_PLUGIN_NEO4J_TRACE_CYPHER_PARAMETERS:false}
# If set to positive number, the `db.cypher.parameters` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
plugin.neo4j.cypher_parameters_max_length=${SW_PLUGIN_NEO4J_CYPHER_PARAMETERS_MAX_LENGTH:512}
# If set to positive number, the `db.statement` would be truncated to this length, otherwise it would be completely saved, which may cause performance problem.
plugin.neo4j.cypher_body_max_length=${SW_PLUGIN_NEO4J_CYPHER_BODY_MAX_LENGTH:2048}
# If set to a positive number and activate `trace sampler CPU policy plugin`, the trace would not be collected when agent process CPU usage percent is greater than `plugin.cpupolicy.sample_cpu_usage_percent_limit`.
plugin.cpupolicy.sample_cpu_usage_percent_limit=${SW_SAMPLE_CPU_USAGE_PERCENT_LIMIT:-1}
# This config item controls that whether the Micronaut http client plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
plugin.micronauthttpclient.collect_http_params=${SW_PLUGIN_MICRONAUTHTTPCLIENT_COLLECT_HTTP_PARAMS:false}
# This config item controls that whether the Micronaut http server plugin should collect the parameters of the request. Also, activate implicitly in the profiled trace.
plugin.micronauthttpserver.collect_http_params=${SW_PLUGIN_MICRONAUTHTTPSERVER_COLLECT_HTTP_PARAMS:false}
# Specify which command should be converted to write operation
plugin.memcached.operation_mapping_write=${SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_WRITE:set,add,replace,append,prepend,cas,delete,touch,incr,decr}
# Specify which command should be converted to read operation
plugin.memcached.operation_mapping_read=${SW_PLUGIN_MEMCACHED_OPERATION_MAPPING_READ:get,gets,getAndTouch,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck}
# Specify which command should be converted to write operation
plugin.ehcache.operation_mapping_write=${SW_PLUGIN_EHCACHE_OPERATION_MAPPING_WRITE:tryRemoveImmediately,remove,removeAndReturnElement,removeAll,removeQuiet,removeWithWriter,put,putAll,replace,removeQuiet,removeWithWriter,removeElement,removeAll,putWithWriter,putQuiet,putIfAbsent,putIfAbsent}
# Specify which command should be converted to read operation
plugin.ehcache.operation_mapping_read=${SW_PLUGIN_EHCACHE_OPERATION_MAPPING_READ:get,getAll,getQuiet,getKeys,getKeysWithExpiryCheck,getKeysNoDuplicateCheck,releaseRead,tryRead,getWithLoader,getAll,loadAll,getAllWithLoader}
# Specify which command should be converted to write operation
plugin.guavacache.operation_mapping_write=${SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_WRITE:put,putAll,invalidate,invalidateAll,invalidateAll,cleanUp}
# Specify which command should be converted to read operation
plugin.guavacache.operation_mapping_read=${SW_PLUGIN_GUAVACACHE_OPERATION_MAPPING_READ:getIfPresent,get,getAllPresent,size}
# If set to true, the parameters of Redis commands would be collected by Jedis agent.
plugin.jedis.trace_redis_parameters=${SW_PLUGIN_JEDIS_TRACE_REDIS_PARAMETERS:false}
# If set to positive number and plugin.jedis.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length.
plugin.jedis.redis_parameter_max_length=${SW_PLUGIN_JEDIS_REDIS_PARAMETER_MAX_LENGTH:128}
# Specify which command should be converted to write operation
plugin.jedis.operation_mapping_write=${SW_PLUGIN_JEDIS_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
# Specify which command should be converted to read operation
plugin.jedis.operation_mapping_read=${SW_PLUGIN_JEDIS_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
# If set to true, the parameters of Redis commands would be collected by Redisson agent.
plugin.redisson.trace_redis_parameters=${SW_PLUGIN_REDISSON_TRACE_REDIS_PARAMETERS:false}
# If set to positive number and plugin.redisson.trace_redis_parameters is set to true, Redis command parameters would be collected and truncated to this length.
plugin.redisson.redis_parameter_max_length=${SW_PLUGIN_REDISSON_REDIS_PARAMETER_MAX_LENGTH:128}
# Specify which command should be converted to write operation
plugin.redisson.operation_mapping_write=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_WRITE:getset,set,setbit,setex,setnx,setrange,strlen,mset,msetnx,psetex,incr,incrby,incrbyfloat,decr,decrby,append,hmset,hset,hsetnx,hincrby,hincrbyfloat,hdel,rpoplpush,rpush,rpushx,lpush,lpushx,lrem,ltrim,lset,brpoplpush,linsert,sadd,sdiff,sdiffstore,sinterstore,sismember,srem,sunion,sunionstore,sinter,zadd,zincrby,zinterstore,zrange,zrangebylex,zrangebyscore,zrank,zrem,zremrangebylex,zremrangebyrank,zremrangebyscore,zrevrange,zrevrangebyscore,zrevrank,zunionstore,xadd,xdel,del,xtrim}
# Specify which command should be converted to read operation
plugin.redisson.operation_mapping_read=${SW_PLUGIN_REDISSON_OPERATION_MAPPING_READ:getrange,getbit,mget,hvals,hkeys,hlen,hexists,hget,hgetall,hmget,blpop,brpop,lindex,llen,lpop,lrange,rpop,scard,srandmember,spop,sscan,smove,zlexcount,zscore,zscan,zcard,zcount,xget,get,xread,xlen,xrange,xrevrange}
# This config item controls that whether the Netty-http plugin should collect the http body of the request.
plugin.nettyhttp.collect_request_body=${SW_PLUGIN_NETTYHTTP_COLLECT_REQUEST_BODY:false}
# When `HTTP_COLLECT_REQUEST_BODY` is enabled, how many characters to keep and send to the OAP backend, use negative values to keep and send the complete body.
plugin.nettyhttp.filter_length_limit=${SW_PLUGIN_NETTYHTTP_FILTER_LENGTH_LIMIT:1024}
# When `HTTP_COLLECT_REQUEST_BODY` is enabled and content-type start with HTTP_SUPPORTED_CONTENT_TYPES_PREFIX, collect the body of the request , multiple paths should be separated by `,`
plugin.nettyhttp.supported_content_types_prefix=${SW_PLUGIN_NETTYHTTP_SUPPORTED_CONTENT_TYPES_PREFIX:application/json,text/}
# If set to true, the keys of messages would be collected by the plugin for RocketMQ Java client.
plugin.rocketmqclient.collect_message_keys=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_KEYS:false}
# If set to true, the tags of messages would be collected by the plugin for RocketMQ Java client.
plugin.rocketmqclient.collect_message_tags=${SW_PLUGIN_ROCKETMQCLIENT_COLLECT_MESSAGE_TAGS:false}
# Define the max length of collected HTTP parameters. The default value(=0) means not collecting.
plugin.solon.http_params_length_threshold=${SW_PLUGIN_SOLON_HTTP_PARAMS_LENGTH_THRESHOLD:0}
# It controls what header data should be collected, values must be in lower case, if empty, no header data will be collected. default is empty.
plugin.solon.include_http_headers=${SW_PLUGIN_SOLON_INCLUDE_HTTP_HEADERS:}
# Define the max length of collected HTTP body. The default value(=0) means not collecting.
plugin.solon.http_body_length_threshold=${SW_PLUGIN_SOLON_HTTP_BODY_LENGTH_THRESHOLD:0}

View File

@@ -0,0 +1,26 @@
Copyright (c) 2012 France Télécom
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files have changed in this diff Show More