mirror of
https://gitee.com/youlaitech/youlai-mall.git
synced 2024-12-22 12:48:59 +08:00
refactor: 移除中间件模块和相关代码重构
This commit is contained in:
parent
8cead0bb79
commit
37a90229c5
21
README.md
21
README.md
@ -52,23 +52,35 @@
|
||||
| 商城管理端 | [mall-admin](https://gitee.com/youlaiorg/mall-admin) | [mall-admin](https://github.com/youlaitech/mall-admin) | - |
|
||||
| 移动应用端 | [mall-app](https://gitee.com/youlaiorg/mall-app) | [mall-app](https://github.com/youlaitech/mall-app) | - |
|
||||
|
||||
## 🗂项目目录
|
||||
## 🗂目录结构
|
||||
|
||||
``` text
|
||||
youlai-mall
|
||||
├── docs
|
||||
├── nacos # Nacos配置
|
||||
├── sql # SQL脚本
|
||||
├── laboratory # 实验室
|
||||
├── mysql5 # 接口请求
|
||||
├── mysql8 # 接口请求
|
||||
├── mall-oms # 订单服务
|
||||
├── mall-pms # 商品服务
|
||||
├── mall-sms # 营销服务
|
||||
├── mall-ums # 会员服务
|
||||
├── middleware # 中间件(nacos/seata)
|
||||
├── youlai-auth # OAuth2认证授权中心
|
||||
├── youlai-common # 公共依赖
|
||||
├── youlai-common # 公共模块
|
||||
├── common-core # 核心依赖
|
||||
├── common-file # 文件接口
|
||||
├── common-log # 日志公共配置
|
||||
├── common-mybatis # mybatis 公共依赖配置
|
||||
├── common-rabbitmq # rabbitmq 公共依赖配置
|
||||
├── common-redis # redis 公共依赖配置
|
||||
├── common-seata # seata 公共依赖配置
|
||||
├── common-security # 订单应用
|
||||
├── common-sms # 订单应用
|
||||
├── common-web # 订单应用
|
||||
├── youlai-gateway # 网关
|
||||
├── youlai-system # 系统服务
|
||||
├── oms-api # 订单Feign接口
|
||||
├── oms-boot # 订单应用
|
||||
└── end
|
||||
```
|
||||
|
||||
@ -185,6 +197,7 @@ youlai-mall
|
||||
|
||||
## 💻贡献者们
|
||||
|
||||
|
||||
<a href="https://github.com/youlaitech/youlai-mall/graphs/contributors"><img src="https://opencollective.com/youlai-mall/contributors.svg?width=890" /></a>
|
||||
|
||||
## 💥加交流群
|
||||
|
@ -1,191 +0,0 @@
|
||||
properties([
|
||||
parameters([
|
||||
[$class: "ChoiceParameterDefinition",
|
||||
description: "你需要选择哪条分支进行构建?",
|
||||
name: "branch_name",
|
||||
choices: ["youlai_k8s_deploy", "master"]
|
||||
],
|
||||
[$class: "ChoiceParameter",
|
||||
choiceType: "PT_SINGLE_SELECT",
|
||||
description: "你需要选择哪个微服务模块进行构建?",
|
||||
filterLength: 1,
|
||||
filterable: false,
|
||||
name: "module_name",
|
||||
randomName: "choice-parameter-5631314439613978",
|
||||
script: [
|
||||
$class: "GroovyScript",
|
||||
fallbackScript: [
|
||||
classpath: [],
|
||||
sandbox: false,
|
||||
script:
|
||||
'''return["Could not get module_name"]'''
|
||||
],
|
||||
script: [
|
||||
classpath: [],
|
||||
sandbox: false,
|
||||
script:
|
||||
'''return["youlai-gateway", "youlai-auth","youlai-admin/admin-boot","mall-oms/oms-boot","mall-pms/pms-boot","mall-sms/sms-boot","mall-ums/ums-boot"]
|
||||
'''
|
||||
]
|
||||
]
|
||||
],
|
||||
[$class: "CascadeChoiceParameter",
|
||||
choiceType: "PT_SINGLE_SELECT",
|
||||
description: "你需要选择哪台机器进行部署微服务?",
|
||||
filterLength: 1,
|
||||
filterable: false,
|
||||
name: "deploy_on",
|
||||
randomName: "choice-parameter-5631314456178619",
|
||||
referencedParameters: "module_name",
|
||||
script: [
|
||||
$class: "GroovyScript",
|
||||
fallbackScript: [
|
||||
classpath: [],
|
||||
sandbox: false,
|
||||
script:
|
||||
'''return["Could not get Environment from module_name Param"]'''
|
||||
],
|
||||
script: [
|
||||
classpath: [],
|
||||
sandbox: false,
|
||||
script:
|
||||
'''if (module_name.equals("youlai-gateway")){
|
||||
return["a.youlai.tech"]
|
||||
}
|
||||
else if(module_name.equals("mall-ums/ums-boot")){
|
||||
return["c.youlai.tech"]
|
||||
}
|
||||
else if(module_name.equals("mall-oms/oms-boot")){
|
||||
return["d.youlai.tech"]
|
||||
}
|
||||
else {
|
||||
return["f.youlai.tech"]
|
||||
}
|
||||
'''
|
||||
]
|
||||
]
|
||||
]
|
||||
])
|
||||
])
|
||||
// 以上代码,需要插件支持,主要作用是:选择具体的微服务,部署至对应的机器
|
||||
|
||||
pipeline { // 直接上机器,在机器上启容器
|
||||
agent {
|
||||
node {
|
||||
label "maven"
|
||||
}
|
||||
}
|
||||
|
||||
environment {
|
||||
// 自建harbor仓库的namespace
|
||||
docker_hub_namespace = "youlai"
|
||||
// docker_hub_namespace = "youlaiwuhui"
|
||||
|
||||
// 自建镜像仓库地址
|
||||
docker_hub = "k8s-harbor:30002"
|
||||
docker_hub_ext = "harbor.howlaisi.com:30002"
|
||||
// docker_hub = "https://registry.cn-hangzhou.aliyuncs.com"
|
||||
|
||||
// 在jenknis或kubesphere上面的凭证
|
||||
docker_hub_id = "youlai-zhangjialin-myself-harbor-account"
|
||||
// docker_hub_id = "zhangjialin-aliyun-pingzheng"
|
||||
|
||||
// k8s 上面的 namespace
|
||||
k8s_namespace = "youlai-mall"
|
||||
GIT_COMMIT_ID = sh(returnStdout: true, script: 'git rev-parse --short HEAD').trim()
|
||||
// BUILD_NUMBER 这个变量从jenkins传递过来
|
||||
current_build_number = "${BUILD_NUMBER}"
|
||||
// 在k8s上面配置的id
|
||||
KUBECONFIG_CREDENTIAL_ID = "youlai-kubeconfig"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage ("打印相关变量") {
|
||||
steps{
|
||||
echo "docker_hub_namespace信息为: ${docker_hub_namespace}"
|
||||
// 获取commit信息,用于后面打tag
|
||||
echo "commit信息为:${env.GIT_COMMIT_ID}"
|
||||
echo "current_build_number信息为:${env.current_build_number}"
|
||||
script {
|
||||
// 因为传递过来的module有可能是youlai-admin/admin-boot形式的,打镜像时会失败
|
||||
env.module_name_prefix = "${module_name}".split("/")[0]
|
||||
env.module_name_suffix = "${module_name}".split("/")[-1]
|
||||
// 本端tag名
|
||||
env.local_tag = "${module_name_suffix}:${current_build_number}_${GIT_COMMIT_ID}"
|
||||
// 远端tag名,必须以这种方式命令,才能push到远端
|
||||
env.remote_tag = "${docker_hub}/${docker_hub_namespace}/${local_tag}"
|
||||
// 外网访问下载的远端tag
|
||||
env.remote_tag_ext = "${docker_hub_ext}/${docker_hub_namespace}/${local_tag}"
|
||||
echo "module_name信息为: ${module_name}"
|
||||
echo "module_name_prefix信息为: ${module_name_prefix}"
|
||||
echo "module_name_suffix信息为: ${module_name_suffix}"
|
||||
echo "local_tag信息为:${env.local_tag}"
|
||||
echo "remote_tag信息为:${env.remote_tag}"
|
||||
echo "remote_tag_ext信息为:${env.remote_tag_ext}"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("checkout代码") {
|
||||
steps {
|
||||
//git branch: "${branch_name}", credentialsId: 'zhangjialin-youlai-mall-pingzheng', url: 'https://gitee.com/youlaitech/youlai-mall.git'
|
||||
|
||||
//checkout([
|
||||
//$class: 'GitSCM',
|
||||
//branches: [[name: "${branch_name}"]],
|
||||
//extensions: [[$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true]],
|
||||
//userRemoteConfigs: [[credentialsId: 'zhangjialin-youlai-mall-pingzheng', url: 'https://gitee.com/youlaitech/youlai-mall.git']]])
|
||||
sh "du -h --max-depth=1"
|
||||
}
|
||||
}
|
||||
stage("读取maven配置"){
|
||||
steps {
|
||||
script {
|
||||
// 需要用到插件Pipeline Utility Steps,参考:https://www.jianshu.com/p/29403ecf7fc2
|
||||
def pom = readMavenPom file: "${module_name}/pom.xml"
|
||||
def properties = pom.properties
|
||||
env.service_port = properties["service.port"]
|
||||
env.service_nodeport = properties["service.nodeport"]
|
||||
sh "echo service_port: ${service_port}"
|
||||
sh "echo service_nodeport: ${service_nodeport}"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("打包镜像") {
|
||||
steps {
|
||||
script {
|
||||
container ('maven') {
|
||||
// 最外边
|
||||
sh "mvn clean install -Dmaven.test.skip=true"
|
||||
sh "mvn -f ${module_name} clean package dockerfile:build -Ddockerfile.tag=${current_build_number}_${GIT_COMMIT_ID} -Dmaven.test.skip=true -Dspring.profiles.active=dev"
|
||||
withCredentials([usernamePassword(credentialsId: "${docker_hub_id}", passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USERNAME')]) {
|
||||
sh 'echo "$DOCKER_PASSWORD" | docker login http://k8s-harbor:30002 -u "$DOCKER_USERNAME" --password-stdin'
|
||||
sh "docker tag ${env.local_tag} ${env.remote_tag}"
|
||||
sh "docker push ${env.remote_tag}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("自动部署至docker容器") {
|
||||
agent none
|
||||
steps {
|
||||
script {
|
||||
panlong_deploy_script_url = "\'https://gitee.com/youlaitech/youlai-mall/raw/${branch_name}/deploy.sh?access_token=a646170fbfafe84d3412ff594d530b6d&ref=${branch_name}\'"
|
||||
script_dir = "/opt/youlai/script"
|
||||
withCredentials([usernamePassword(credentialsId: "${docker_hub_id}", passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USERNAME')]) {
|
||||
sshPublisher(publishers: [
|
||||
sshPublisherDesc(
|
||||
configName: "${deploy_on}",
|
||||
transfers: [
|
||||
sshTransfer(cleanRemote: false,
|
||||
excludes: '',
|
||||
execCommand: "mkdir -p ${script_dir} && cd ${script_dir} && rm -rf deploy.sh.${module_name_suffix} && curl -X GET --header 'Content-Type: application/json;charset=UTF-8' ${panlong_deploy_script_url} -o deploy.sh.${module_name_suffix} && sh deploy.sh.${module_name_suffix} ${remote_tag_ext} ${service_port} ${docker_hub_namespace} ${module_name_suffix} ${DOCKER_USERNAME} ${DOCKER_PASSWORD}",execTimeout: 1200000, flatten: false, makeEmptyDirs: false, noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: '', remoteDirectorySDF: false, removePrefix: '', sourceFiles: '')],
|
||||
usePromotionTimestamp: false,
|
||||
useWorkspaceInPromotion: false,
|
||||
verbose: true)])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,117 +0,0 @@
|
||||
pipeline { // 直接上k8s,用k8s来管理docker
|
||||
agent {
|
||||
node {
|
||||
label "maven"
|
||||
}
|
||||
}
|
||||
|
||||
parameters {
|
||||
choice(
|
||||
description: "你需要选择哪条分支进行构建?",
|
||||
name: "branch_name",
|
||||
choices: ["master", "feature/youlai_k8s_deploy3"]
|
||||
)
|
||||
choice(
|
||||
description: "你需要选择哪个微服务模块进行构建?",
|
||||
name: "module_name",
|
||||
choices: ["youlai-gateway", "youlai-auth","youlai-admin/admin-boot","mall-oms/oms-boot","mall-pms/pms-boot","mall-sms/sms-boot","mall-ums/ums-boot"]
|
||||
)
|
||||
}
|
||||
environment {
|
||||
// 自建harbor仓库的namespace
|
||||
docker_hub_namespace = "youlai"
|
||||
// docker_hub_namespace = "youlaiwuhui"
|
||||
|
||||
// 自建镜像仓库地址
|
||||
docker_hub = "k8s-harbor:30002"
|
||||
// docker_hub = "https://registry.cn-hangzhou.aliyuncs.com"
|
||||
|
||||
// 在jenknis或kubesphere上面的凭证
|
||||
docker_hub_id = "youlai-zhangjialin-myself-harbor-account"
|
||||
// docker_hub_id = "zhangjialin-aliyun-pingzheng"
|
||||
|
||||
// k8s 上面的 namespace
|
||||
k8s_namespace = "youlai-mall"
|
||||
GIT_COMMIT_ID = sh(returnStdout: true, script: 'git rev-parse --short HEAD').trim()
|
||||
// BUILD_NUMBER 这个变量从jenkins传递过来
|
||||
current_build_number = "${BUILD_NUMBER}"
|
||||
// 在k8s上面配置的id
|
||||
KUBECONFIG_CREDENTIAL_ID = "youlai-kubeconfig"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage ("打印相关变量") {
|
||||
steps{
|
||||
echo "docker_hub_namespace信息为: ${docker_hub_namespace}"
|
||||
// 获取commit信息,用于后面打tag
|
||||
echo "commit信息为:${env.GIT_COMMIT_ID}"
|
||||
echo "current_build_number信息为:${env.current_build_number}"
|
||||
script {
|
||||
// 因为传递过来的module有可能是youlai-admin/admin-boot形式的,打镜像时会失败
|
||||
env.module_name_prefix = "${module_name}".split("/")[0]
|
||||
env.module_name_suffix = "${module_name}".split("/")[-1]
|
||||
// 本端tag名
|
||||
env.local_tag = "${module_name_suffix}:${current_build_number}_${GIT_COMMIT_ID}"
|
||||
// 远端tag名,必须以这种方式命令,才能push到远端
|
||||
env.remote_tag = "${docker_hub}/${docker_hub_namespace}/${local_tag}"
|
||||
echo "module_name信息为: ${module_name}"
|
||||
echo "module_name_prefix信息为: ${module_name_prefix}"
|
||||
echo "module_name_suffix信息为: ${module_name_suffix}"
|
||||
echo "local_tag信息为:${env.local_tag}"
|
||||
echo "remote_tag信息为:${env.remote_tag}"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("checkout代码") {
|
||||
steps {
|
||||
//git branch: "${branch_name}", credentialsId: 'zhangjialin-youlai-mall-pingzheng', url: 'https://gitee.com/youlaitech/youlai-mall.git'
|
||||
|
||||
//checkout([
|
||||
//$class: 'GitSCM',
|
||||
//branches: [[name: "${branch_name}"]],
|
||||
//extensions: [[$class: 'CloneOption', depth: 1, noTags: false, reference: '', shallow: true]],
|
||||
//userRemoteConfigs: [[credentialsId: 'zhangjialin-youlai-mall-pingzheng', url: 'https://gitee.com/youlaitech/youlai-mall.git']]])
|
||||
sh "du -h --max-depth=1"
|
||||
}
|
||||
}
|
||||
stage("读取maven配置"){
|
||||
steps {
|
||||
script {
|
||||
// 需要用到插件Pipeline Utility Steps,参考:https://www.jianshu.com/p/29403ecf7fc2
|
||||
def pom = readMavenPom file: "${module_name}/pom.xml"
|
||||
def properties = pom.properties
|
||||
env.service_port = properties["service.port"]
|
||||
env.service_nodeport = properties["service.nodeport"]
|
||||
sh "echo service_port: ${service_port}"
|
||||
sh "echo service_nodeport: ${service_nodeport}"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("打包镜像") {
|
||||
steps {
|
||||
script {
|
||||
container ('maven') {
|
||||
// 最外边
|
||||
sh "mvn clean install -Dmaven.test.skip=true"
|
||||
sh "cd $module_name && docker build -t ${env.local_tag} -f ./Dockerfile ."
|
||||
//sh "mvn -f ${module_name} clean package dockerfile:build -Ddockerfile.tag=${current_build_number}_${GIT_COMMIT_ID} -Dmaven.test.skip=true -Dspring.profiles.active=k8s"
|
||||
withCredentials([usernamePassword(credentialsId: "${docker_hub_id}", passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USERNAME')]) {
|
||||
sh 'echo "$DOCKER_PASSWORD" | docker login http://k8s-harbor:30002 -u "$DOCKER_USERNAME" --password-stdin'
|
||||
sh "docker tag ${env.local_tag} ${env.remote_tag}"
|
||||
sh "docker push ${env.remote_tag}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("自动部署至k8s") {
|
||||
agent none
|
||||
steps {
|
||||
container ("maven") {
|
||||
// 这种方式启k8s是官方推荐的
|
||||
sh 'envsubst < devops/deploy.yaml | kubectl apply -f -'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
pipeline { // 直接上k8s,用k8s起docker来扫描代码
|
||||
agent {
|
||||
node {
|
||||
label "maven"
|
||||
}
|
||||
}
|
||||
parameters {
|
||||
choice(
|
||||
description: "你需要选择哪条分支进行扫描?",
|
||||
name: "branch_name",
|
||||
choices: ["youlai_k8s_deploy", "master"]
|
||||
)
|
||||
}
|
||||
stages {
|
||||
stage("项目编译") {
|
||||
agent none
|
||||
steps {
|
||||
container("maven") {
|
||||
sh "ls -al"
|
||||
sh "mvn clean install -Dmaven.test.skip=true"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("代码扫描") {
|
||||
steps {
|
||||
script {
|
||||
container ("maven") {
|
||||
withCredentials([string(credentialsId : "youlai-youlai-mall-sonar-token" ,variable : "SONAR_TOKEN" ,)]) {
|
||||
withSonarQubeEnv("sonar") {
|
||||
sh "mvn sonar:sonar -Dsonar.projectKey=youlai-mall -Dsonar.projectName=youlai-mall -f ./pom.xml -Dsonar.host.url=http://ks.howlaisi.com:31452/ -Dsonar.login=${SONAR_TOKEN}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
failure {
|
||||
addGiteeMRComment comment: ":x: Jenkins CI 构建失败。 [BUILD](" + env.BUILD_URL + ")"
|
||||
}
|
||||
success {
|
||||
addGiteeMRComment comment: """:white_check_mark: Jenkins CI构建通过--> [BUILD](""" + env.BUILD_URL + """)""" + """\n""" + """:white_check_mark: sonar扫描结果--> [SONAR](""" + """http://ks.howlaisi.com:31452/dashboard?id=youlai-mall""" + """)"""
|
||||
}
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
remote_tag_ext=$1
|
||||
port=$2
|
||||
docker_hub_namespace=$3
|
||||
module_name_suffix=$4
|
||||
DOCKER_USERNAME=$5
|
||||
DOCKER_PASSWORD=$6
|
||||
# 登陆harbor
|
||||
echo "$DOCKER_PASSWORD" | docker login http://harbor.howlaisi.com:30002 -u "$DOCKER_USERNAME" --password-stdin
|
||||
# 停掉容器
|
||||
if [ -n "$docker_hub_namespace" -a -n "$module_name_suffix" ]
|
||||
then
|
||||
docker stop $(docker ps -a | grep "${docker_hub_namespace}_${module_name_suffix}" | awk '{print $1}') || true
|
||||
docker rm -f $(docker ps -a | grep "${docker_hub_namespace}_${module_name_suffix}" | awk '{print $1}') || true
|
||||
docker rmi -f $(docker images -a | grep "${module_name_suffix}" | awk '{print $3}') || true
|
||||
fi
|
||||
# 拉取最新镜像
|
||||
docker pull "${remote_tag_ext}"
|
||||
# 运行容器
|
||||
docker run --restart=always -di --net=host -e "SPRING_PROFILES_ACTIVE=dev" --name="${docker_hub_namespace}_${module_name_suffix}" "${remote_tag_ext}"
|
@ -1,105 +0,0 @@
|
||||
#apiVersion: v1
|
||||
#kind: PersistentVolumeClaim
|
||||
#metadata:
|
||||
# namespace: youlai-mall
|
||||
# name: ${module_name_suffix}-pvc0
|
||||
# labels: {}
|
||||
#spec:
|
||||
# accessModes:
|
||||
# - ReadWriteOnce
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 1Gi # k8s限制,只能扩容,对于pvc,只能扩容,不能缩容,且配置sc.yaml时需要指定allowVolumeExpansion: true #增加该字段表示允许动态扩容
|
||||
# storageClassName: nfs-storage
|
||||
#
|
||||
#---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ${module_name_suffix}
|
||||
name: ${module_name_suffix}-deployment
|
||||
namespace: ${k8s_namespace} #一定要写名称空间
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ${module_name_suffix}
|
||||
strategy: # 更新策略
|
||||
rollingUpdate:
|
||||
maxSurge: 50%
|
||||
maxUnavailable: 50%
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ${module_name_suffix}
|
||||
spec:
|
||||
volumes: # 挂载卷
|
||||
- name: host-time # 挂载主机上的时区
|
||||
hostPath:
|
||||
path: /etc/localtime
|
||||
type: ''
|
||||
# - name: volume-${module_name_suffix} # 挂载日志
|
||||
# persistentVolumeClaim:
|
||||
# claimName: ${module_name_suffix}-pvc0
|
||||
imagePullSecrets:
|
||||
- name: ${docker_hub_id} #提前在项目下配置访问阿里云或harbor的账号密码
|
||||
containers:
|
||||
- image: $remote_tag
|
||||
imagePullPolicy: Always
|
||||
name: ${module_name_suffix}
|
||||
ports:
|
||||
- name: http-${service_port}
|
||||
containerPort: ${service_port}
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: SPRING_PROFILES_ACTIVE
|
||||
value: k8s
|
||||
resources: # 资源限制
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 512Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- name: host-time
|
||||
readOnly: true
|
||||
mountPath: /etc/localtime
|
||||
# - name: volume-${module_name_suffix}
|
||||
# mountPath: /logs
|
||||
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
affinity: # 节点亲合性,这下面表达的意思是,尽量散到不同的机器上
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: ${module_name_suffix}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
revisionHistoryLimit: 2 #deploy 升级最大记录数由 revisionHistoryLimit 定义,默认值为 10
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: ${module_name_suffix}
|
||||
name: ${module_name_suffix}-svc
|
||||
namespace: ${k8s_namespace}
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: ${service_port}
|
||||
protocol: TCP
|
||||
targetPort: ${service_port}
|
||||
nodePort: ${service_nodeport}
|
||||
selector:
|
||||
app: ${module_name_suffix}
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
|
@ -1,33 +0,0 @@
|
||||
` 有来 Spring Security OAuth2 验证码模式
|
||||
@startuml
|
||||
'https://plantuml.com/sequence-diagram
|
||||
skinparam backgroundColor #EEEBDC
|
||||
skinparam handwritten true
|
||||
|
||||
header @有来技术 Spring Security OAuth2 验证码模式
|
||||
|
||||
actor 客户端
|
||||
|
||||
客户端 -> AuthController:密码 + 验证码模式 \n /oauth/token?gran_type=captcha
|
||||
AuthController -> TokenEndpoint: postAccessToken()
|
||||
TokenEndpoint -> CompositeTokenGranter: this.getTokenGranter().grant()
|
||||
participant CaptchaTokenGranter #99FF99
|
||||
CompositeTokenGranter->CaptchaTokenGranter: grant( grantType, tokenRequest )\n根据 grant_type 匹配 Granter
|
||||
note over of AuthenticationManagerDelegator #aqua
|
||||
WebSecurityConfigurerAdapter 内部类
|
||||
end note
|
||||
CaptchaTokenGranter-> AuthenticationManagerDelegator: \n验证码校验\nthis.authenticationManager.authenticate()
|
||||
AuthenticationManagerDelegator -> ProviderManager: this.delegate.authenticate()
|
||||
ProviderManager -> ProviderManager: 匹配支持\nUsernamePasswordAuthenticationToken\n的Provider
|
||||
activate ProviderManager
|
||||
ProviderManager -> DaoAuthenticationProvider: provider.authenticate(authentication)
|
||||
deactivate ProviderManager
|
||||
DaoAuthenticationProvider -> DaoAuthenticationProvider:authenticate()\n密码判读
|
||||
ProviderManager <-- DaoAuthenticationProvider: Authentication
|
||||
AuthenticationManagerDelegator <-- ProviderManager: Authentication
|
||||
CaptchaTokenGranter <-- AuthenticationManagerDelegator: OAuth2Authentication
|
||||
CompositeTokenGranter <-- CaptchaTokenGranter: OAuth2Authentication
|
||||
TokenEndpoint <-- CompositeTokenGranter:OAuth2AccessToken
|
||||
AuthController <-- TokenEndpoint:OAuth2AccessToken
|
||||
客户端 <-- AuthController: OAuth2AccessToken
|
||||
@enduml
|
@ -1,34 +0,0 @@
|
||||
` 有来 Spring Security OAuth2 密码模式
|
||||
@startuml
|
||||
'https://plantuml.com/sequence-diagram
|
||||
skinparam backgroundColor #EEEBDC
|
||||
skinparam handwritten true
|
||||
|
||||
header @有来技术 Spring Security OAuth2 密码模式
|
||||
|
||||
actor 客户端
|
||||
|
||||
客户端 -> AuthController: 密码模式 \n /oauth/token?gran_type=password
|
||||
AuthController -> TokenEndpoint: postAccessToken()
|
||||
TokenEndpoint -> CompositeTokenGranter: this.getTokenGranter().grant()
|
||||
CompositeTokenGranter->ResourceOwnerPasswordTokenGranter:grant( grantType, tokenRequest )\n根据 grant_type 匹配 Granter
|
||||
note over of AuthenticationManagerDelegator #aqua
|
||||
WebSecurityConfigurerAdapter 内部类
|
||||
end note
|
||||
ResourceOwnerPasswordTokenGranter-> AuthenticationManagerDelegator:this.authenticationManager.authenticate()
|
||||
AuthenticationManagerDelegator -> ProviderManager: this.delegate.authenticate()
|
||||
|
||||
|
||||
ProviderManager -> ProviderManager: 匹配支持\nUsernamePasswordAuthenticationToken\n的Provider
|
||||
activate ProviderManager
|
||||
ProviderManager -> DaoAuthenticationProvider: provider.authenticate(authentication)
|
||||
deactivate ProviderManager
|
||||
DaoAuthenticationProvider -> DaoAuthenticationProvider:authenticate()\n密码判读
|
||||
ProviderManager <-- DaoAuthenticationProvider: Authentication
|
||||
AuthenticationManagerDelegator <-- ProviderManager: Authentication
|
||||
ResourceOwnerPasswordTokenGranter <-- AuthenticationManagerDelegator: OAuth2Authentication
|
||||
CompositeTokenGranter <-- ResourceOwnerPasswordTokenGranter: OAuth2Authentication
|
||||
TokenEndpoint <-- CompositeTokenGranter:OAuth2AccessToken
|
||||
AuthController <-- TokenEndpoint:OAuth2AccessToken
|
||||
客户端 <-- AuthController: OAuth2AccessToken
|
||||
@enduml
|
@ -1,34 +0,0 @@
|
||||
` 有来 Spring Security OAuth2 刷新模式
|
||||
@startuml
|
||||
'https://plantuml.com/sequence-diagram
|
||||
skinparam backgroundColor #EEEBDC
|
||||
skinparam handwritten true
|
||||
|
||||
header @有来技术 Spring Security OAuth2 刷新模式
|
||||
|
||||
actor 客户端
|
||||
|
||||
客户端 -> AuthController: 刷新模式 \n /oauth/token?gran_type=refresh_token
|
||||
AuthController -> TokenEndpoint: postAccessToken()
|
||||
TokenEndpoint -> CompositeTokenGranter: this.getTokenGranter().grant()
|
||||
CompositeTokenGranter->RefreshTokenGranter:grant( grantType, tokenRequest )\n根据 grant_type 匹配 Granter
|
||||
note over of AuthenticationManagerDelegator #aqua
|
||||
WebSecurityConfigurerAdapter 内部类
|
||||
end note
|
||||
RefreshTokenGranter-> AuthenticationManagerDelegator:this.authenticationManager.authenticate()
|
||||
AuthenticationManagerDelegator -> ProviderManager: this.delegate.authenticate()
|
||||
|
||||
|
||||
ProviderManager -> ProviderManager: 匹配支持\nPreAuthenticatedAuthenticationToken\n的Provider
|
||||
activate ProviderManager
|
||||
ProviderManager -> PreAuthenticatedAuthenticationProvider: provider.authenticate(authentication)
|
||||
deactivate ProviderManager
|
||||
PreAuthenticatedAuthenticationProvider -> PreAuthenticatedAuthenticationProvider:authenticate()\n用户校验\n是否锁定\n是否有效\n是否过期
|
||||
ProviderManager <-- PreAuthenticatedAuthenticationProvider: Authentication
|
||||
AuthenticationManagerDelegator <-- ProviderManager: Authentication
|
||||
RefreshTokenGranter <-- AuthenticationManagerDelegator: OAuth2Authentication
|
||||
CompositeTokenGranter <-- RefreshTokenGranter: OAuth2Authentication
|
||||
TokenEndpoint <-- CompositeTokenGranter:OAuth2AccessToken
|
||||
AuthController <-- TokenEndpoint:OAuth2AccessToken
|
||||
客户端 <-- AuthController: OAuth2AccessToken
|
||||
@enduml
|
@ -1,37 +0,0 @@
|
||||
` 有来 Spring Security OAuth2 手机短信验证码模式
|
||||
@startuml
|
||||
'https://plantuml.com/sequence-diagram
|
||||
skinparam backgroundColor #EEEBDC
|
||||
skinparam handwritten true
|
||||
|
||||
header @有来技术 Spring Security OAuth2 手机短信验证码模式
|
||||
|
||||
actor 客户端
|
||||
|
||||
|
||||
客户端 -> AuthController: 手机短信验证码模式 \n /oauth/token?gran_type=sms_code
|
||||
AuthController -> TokenEndpoint: postAccessToken()
|
||||
TokenEndpoint -> CompositeTokenGranter: this.getTokenGranter().grant()
|
||||
participant SmsCodeTokenGranter #99FF99
|
||||
CompositeTokenGranter->SmsCodeTokenGranter: grant( grantType, tokenRequest )\n根据 grant_type 匹配 Granter
|
||||
note over of AuthenticationManagerDelegator #aqua
|
||||
WebSecurityConfigurerAdapter 内部类
|
||||
end note
|
||||
SmsCodeTokenGranter-> AuthenticationManagerDelegator:this.authenticationManager.authenticate()
|
||||
AuthenticationManagerDelegator -> ProviderManager: this.delegate.authenticate()
|
||||
ProviderManager -> ProviderManager: 匹配支持\nSmsCodeAuthenticationToken\n的Provider
|
||||
|
||||
activate ProviderManager
|
||||
participant SmsCodeAuthenticationProvider #99FF99
|
||||
ProviderManager -> SmsCodeAuthenticationProvider: provider.authenticate(authentication)
|
||||
deactivate ProviderManager
|
||||
|
||||
SmsCodeAuthenticationProvider -> SmsCodeAuthenticationProvider:authenticate()\n密码判读\n短信验证码校验
|
||||
ProviderManager <-- SmsCodeAuthenticationProvider: Authentication
|
||||
AuthenticationManagerDelegator <-- ProviderManager: Authentication
|
||||
SmsCodeTokenGranter <-- AuthenticationManagerDelegator: OAuth2Authentication
|
||||
CompositeTokenGranter <-- SmsCodeTokenGranter: OAuth2Authentication
|
||||
TokenEndpoint <-- CompositeTokenGranter:OAuth2AccessToken
|
||||
AuthController <-- TokenEndpoint:OAuth2AccessToken
|
||||
客户端 <-- AuthController: OAuth2AccessToken
|
||||
@enduml
|
@ -1,37 +0,0 @@
|
||||
` 手机短信验证码模式 微信授权模式
|
||||
@startuml
|
||||
'https://plantuml.com/sequence-diagram
|
||||
skinparam backgroundColor #EEEBDC
|
||||
skinparam handwritten true
|
||||
|
||||
header @有来技术 Spring Security OAuth2 微信授权模式
|
||||
|
||||
actor 客户端
|
||||
|
||||
|
||||
客户端 -> AuthController: 微信授权登录模式 \n /oauth/token?gran_type=wechat
|
||||
AuthController -> TokenEndpoint: postAccessToken()
|
||||
TokenEndpoint -> CompositeTokenGranter: this.getTokenGranter().grant()
|
||||
participant WechatTokenGranter #99FF99
|
||||
CompositeTokenGranter->WechatTokenGranter: grant( grantType, tokenRequest )\n根据 grant_type 匹配 Granter
|
||||
note over of AuthenticationManagerDelegator #aqua
|
||||
WebSecurityConfigurerAdapter 内部类
|
||||
end note
|
||||
WechatTokenGranter-> AuthenticationManagerDelegator:this.authenticationManager.authenticate()
|
||||
AuthenticationManagerDelegator -> ProviderManager: this.delegate.authenticate()
|
||||
ProviderManager -> ProviderManager: 匹配支持\nWechatAuthenticationToken\n的Provider
|
||||
|
||||
activate ProviderManager
|
||||
participant WechatAuthenticationProvider #99FF99
|
||||
ProviderManager -> WechatAuthenticationProvider: provider.authenticate(authentication)
|
||||
deactivate ProviderManager
|
||||
|
||||
WechatAuthenticationProvider -> WechatAuthenticationProvider: authenticate()\ncode 获取 openid \n解析encryptedData\n获取用户信息
|
||||
ProviderManager <-- WechatAuthenticationProvider: Authentication
|
||||
AuthenticationManagerDelegator <-- ProviderManager: Authentication
|
||||
WechatTokenGranter <-- AuthenticationManagerDelegator: OAuth2Authentication
|
||||
CompositeTokenGranter <-- WechatTokenGranter: OAuth2Authentication
|
||||
TokenEndpoint <-- CompositeTokenGranter:OAuth2AccessToken
|
||||
AuthController <-- TokenEndpoint:OAuth2AccessToken
|
||||
客户端 <-- AuthController: OAuth2AccessToken
|
||||
@enduml
|
@ -1,89 +0,0 @@
|
||||
transport.type=TCP
|
||||
transport.server=NIO
|
||||
transport.heartbeat=true
|
||||
transport.enableClientBatchSendRequest=false
|
||||
transport.threadFactory.bossThreadPrefix=NettyBoss
|
||||
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
|
||||
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
|
||||
transport.threadFactory.shareBossWorker=false
|
||||
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
|
||||
transport.threadFactory.clientSelectorThreadSize=1
|
||||
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
|
||||
transport.threadFactory.bossThreadSize=1
|
||||
transport.threadFactory.workerThreadSize=default
|
||||
transport.shutdown.wait=3
|
||||
service.vgroupMapping.mall_tx_group=default
|
||||
service.default.grouplist=127.0.0.1:8091
|
||||
service.enableDegrade=false
|
||||
service.disableGlobalTransaction=false
|
||||
client.rm.asyncCommitBufferLimit=10000
|
||||
client.rm.lock.retryInterval=10
|
||||
client.rm.lock.retryTimes=30
|
||||
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
|
||||
client.rm.reportRetryCount=5
|
||||
client.rm.tableMetaCheckEnable=false
|
||||
client.rm.tableMetaCheckerInterval=60000
|
||||
client.rm.sqlParserType=druid
|
||||
client.rm.reportSuccessEnable=false
|
||||
client.rm.sagaBranchRegisterEnable=false
|
||||
client.tm.commitRetryCount=5
|
||||
client.tm.rollbackRetryCount=5
|
||||
client.tm.defaultGlobalTransactionTimeout=60000
|
||||
client.tm.degradeCheck=false
|
||||
client.tm.degradeCheckAllowTimes=10
|
||||
client.tm.degradeCheckPeriod=2000
|
||||
store.mode=db
|
||||
store.publicKey=
|
||||
store.file.dir=file_store/data
|
||||
store.file.maxBranchSessionSize=16384
|
||||
store.file.maxGlobalSessionSize=512
|
||||
store.file.fileWriteBufferCacheSize=16384
|
||||
store.file.flushDiskMode=async
|
||||
store.file.sessionReloadReadSize=100
|
||||
store.db.datasource=druid
|
||||
store.db.dbType=mysql
|
||||
store.db.driverClassName=com.mysql.cj.jdbc.Driver
|
||||
store.db.url=jdbc:mysql://www.youlai.tech:3306/seata?useUnicode=true&rewriteBatchedStatements=true
|
||||
store.db.user=seata
|
||||
store.db.password=seata
|
||||
store.db.minConn=5
|
||||
store.db.maxConn=30
|
||||
store.db.globalTable=global_table
|
||||
store.db.branchTable=branch_table
|
||||
store.db.queryLimit=100
|
||||
store.db.lockTable=lock_table
|
||||
store.db.maxWait=5000
|
||||
store.redis.mode=single
|
||||
store.redis.single.host=127.0.0.1
|
||||
store.redis.single.port=6379
|
||||
store.redis.sentinel.masterName=
|
||||
store.redis.sentinel.sentinelHosts=
|
||||
store.redis.maxConn=10
|
||||
store.redis.minConn=1
|
||||
store.redis.maxTotal=100
|
||||
store.redis.database=0
|
||||
store.redis.password=
|
||||
store.redis.queryLimit=100
|
||||
server.recovery.committingRetryPeriod=1000
|
||||
server.recovery.asynCommittingRetryPeriod=1000
|
||||
server.recovery.rollbackingRetryPeriod=1000
|
||||
server.recovery.timeoutRetryPeriod=1000
|
||||
server.maxCommitRetryTimeout=-1
|
||||
server.maxRollbackRetryTimeout=-1
|
||||
server.rollbackRetryTimeoutUnlockEnable=false
|
||||
client.undo.dataValidation=true
|
||||
client.undo.logSerialization=jackson
|
||||
client.undo.onlyCareUpdateColumns=true
|
||||
server.undo.logSaveDays=7
|
||||
server.undo.logDeletePeriod=86400000
|
||||
client.undo.logTable=undo_log
|
||||
client.undo.compress.enable=true
|
||||
client.undo.compress.type=zip
|
||||
client.undo.compress.threshold=64k
|
||||
log.exceptionRate=100
|
||||
transport.serialization=seata
|
||||
transport.compressor=none
|
||||
metrics.enabled=false
|
||||
metrics.registryType=compact
|
||||
metrics.exporterList=prometheus
|
||||
metrics.exporterPrometheusPort=9898
|
@ -1,69 +0,0 @@
|
||||
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
|
||||
-- the table to store GlobalSession data
|
||||
CREATE TABLE IF NOT EXISTS `global_table`
|
||||
(
|
||||
`xid` VARCHAR(128) NOT NULL,
|
||||
`transaction_id` BIGINT,
|
||||
`status` TINYINT NOT NULL,
|
||||
`application_id` VARCHAR(32),
|
||||
`transaction_service_group` VARCHAR(32),
|
||||
`transaction_name` VARCHAR(128),
|
||||
`timeout` INT,
|
||||
`begin_time` BIGINT,
|
||||
`application_data` VARCHAR(2000),
|
||||
`gmt_create` DATETIME,
|
||||
`gmt_modified` DATETIME,
|
||||
PRIMARY KEY (`xid`),
|
||||
KEY `idx_status_gmt_modified` (`status` , `gmt_modified`),
|
||||
KEY `idx_transaction_id` (`transaction_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8;
|
||||
|
||||
-- the table to store BranchSession data
|
||||
CREATE TABLE IF NOT EXISTS `branch_table`
|
||||
(
|
||||
`branch_id` BIGINT NOT NULL,
|
||||
`xid` VARCHAR(128) NOT NULL,
|
||||
`transaction_id` BIGINT,
|
||||
`resource_group_id` VARCHAR(32),
|
||||
`resource_id` VARCHAR(256),
|
||||
`branch_type` VARCHAR(8),
|
||||
`status` TINYINT,
|
||||
`client_id` VARCHAR(64),
|
||||
`application_data` VARCHAR(2000),
|
||||
`gmt_create` DATETIME(6),
|
||||
`gmt_modified` DATETIME(6),
|
||||
PRIMARY KEY (`branch_id`),
|
||||
KEY `idx_xid` (`xid`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8;
|
||||
|
||||
-- the table to store lock data
|
||||
CREATE TABLE IF NOT EXISTS `lock_table`
|
||||
(
|
||||
`row_key` VARCHAR(128) NOT NULL,
|
||||
`xid` VARCHAR(128),
|
||||
`transaction_id` BIGINT,
|
||||
`branch_id` BIGINT NOT NULL,
|
||||
`resource_id` VARCHAR(256),
|
||||
`table_name` VARCHAR(32),
|
||||
`pk` VARCHAR(36),
|
||||
`status` TINYINT NOT NULL DEFAULT '0' COMMENT '0:locked ,1:rollbacking',
|
||||
`gmt_create` DATETIME,
|
||||
`gmt_modified` DATETIME,
|
||||
PRIMARY KEY (`row_key`),
|
||||
KEY `idx_status` (`status`),
|
||||
KEY `idx_branch_id` (`branch_id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `distributed_lock`
|
||||
(
|
||||
`lock_key` CHAR(20) NOT NULL,
|
||||
`lock_value` VARCHAR(20) NOT NULL,
|
||||
`expire` BIGINT,
|
||||
primary key (`lock_key`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8mb4;
|
||||
|
||||
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('HandleAllSession', ' ', 0);
|
@ -1,14 +0,0 @@
|
||||
-- for AT mode you must to init this sql for you business database. the seata server not need it.
|
||||
CREATE TABLE IF NOT EXISTS `undo_log`
|
||||
(
|
||||
`branch_id` BIGINT NOT NULL COMMENT 'branch transaction id',
|
||||
`xid` VARCHAR(128) NOT NULL COMMENT 'global transaction id',
|
||||
`context` VARCHAR(128) NOT NULL COMMENT 'undo_log context,such as serialization',
|
||||
`rollback_info` LONGBLOB NOT NULL COMMENT 'rollback info',
|
||||
`log_status` INT(11) NOT NULL COMMENT '0:normal status,1:defense status',
|
||||
`log_created` DATETIME(6) NOT NULL COMMENT 'create datetime',
|
||||
`log_modified` DATETIME(6) NOT NULL COMMENT 'modify datetime',
|
||||
UNIQUE KEY `ux_undo_log` (`xid`, `branch_id`)
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 1
|
||||
DEFAULT CHARSET = utf8 COMMENT ='AT transaction mode undo table';
|
@ -9,10 +9,9 @@ import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
|
||||
import org.springframework.cloud.openfeign.EnableFeignClients;
|
||||
import org.springframework.transaction.annotation.EnableTransactionManagement;
|
||||
|
||||
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
|
||||
@SpringBootApplication
|
||||
@EnableDiscoveryClient
|
||||
@EnableFeignClients(basePackageClasses = { MemberFeignClient.class, SkuFeignClient.class})
|
||||
@EnableTransactionManagement
|
||||
public class OmsApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
@ -1,24 +1,23 @@
|
||||
package com.youlai.mall.oms.controller.admin;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.youlai.common.result.PageResult;
|
||||
import com.youlai.common.result.Result;
|
||||
import com.youlai.mall.oms.dto.OrderInfoDTO;
|
||||
import com.youlai.mall.oms.dto.SeataOrderDTO;
|
||||
import com.youlai.mall.oms.common.enums.OrderStatusEnum;
|
||||
import com.youlai.mall.oms.model.dto.OrderDTO;
|
||||
import com.youlai.mall.oms.model.entity.OmsOrder;
|
||||
import com.youlai.mall.oms.model.entity.OmsOrderItem;
|
||||
import com.youlai.mall.oms.model.query.OrderPageQuery;
|
||||
import com.youlai.mall.oms.service.OrderItemService;
|
||||
import com.youlai.mall.oms.service.admin.OmsOrderService;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -1,4 +1,4 @@
|
||||
package com.youlai.mall.oms.common.enums;
|
||||
package com.youlai.mall.oms.enums;
|
||||
|
||||
import com.youlai.common.base.IBaseEnum;
|
||||
import lombok.Getter;
|
@ -1,4 +1,4 @@
|
||||
package com.youlai.mall.oms.common.enums;
|
||||
package com.youlai.mall.oms.enums;
|
||||
|
||||
import com.youlai.common.base.IBaseEnum;
|
||||
import lombok.Getter;
|
@ -1,4 +1,4 @@
|
||||
package com.youlai.mall.oms.common.enums;
|
||||
package com.youlai.mall.oms.enums;
|
||||
|
||||
|
||||
import com.youlai.common.base.IBaseEnum;
|
@ -9,7 +9,7 @@ import org.apache.ibatis.annotations.*;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 订单详情表
|
||||
* 订单表
|
||||
*/
|
||||
@Mapper
|
||||
public interface OrderMapper extends BaseMapper<OmsOrder> {
|
||||
|
@ -1,6 +1,6 @@
|
||||
package com.youlai.mall.oms.model.form;
|
||||
|
||||
import com.youlai.mall.oms.common.enums.OrderSourceTypeEnum;
|
||||
import com.youlai.mall.oms.enums.OrderSourceTypeEnum;
|
||||
import com.youlai.mall.oms.model.dto.OrderItemDTO;
|
||||
import com.youlai.mall.ums.dto.MemberAddressDTO;
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
@ -1,19 +1,13 @@
|
||||
package com.youlai.mall.oms.service.admin.impl;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
|
||||
import com.youlai.common.redis.BusinessSnGenerator;
|
||||
import com.youlai.mall.oms.common.enums.OrderStatusEnum;
|
||||
import com.youlai.mall.oms.dto.SeataOrderDTO;
|
||||
import com.youlai.mall.oms.mapper.OrderMapper;
|
||||
import com.youlai.mall.oms.model.entity.OmsOrder;
|
||||
import com.youlai.mall.oms.model.query.OrderPageQuery;
|
||||
import com.youlai.mall.oms.service.admin.OmsOrderService;
|
||||
import com.youlai.mall.ums.api.MemberFeignClient;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -5,7 +5,7 @@ import cn.hutool.core.lang.Assert;
|
||||
import com.youlai.common.result.ResultCode;
|
||||
import com.youlai.common.security.util.SecurityUtils;
|
||||
import com.youlai.common.web.exception.BizException;
|
||||
import com.youlai.mall.oms.common.constant.OmsConstants;
|
||||
import com.youlai.common.constant.OrderConstants;
|
||||
import com.youlai.mall.oms.model.dto.CartItemDTO;
|
||||
import com.youlai.mall.oms.service.CartService;
|
||||
import com.youlai.mall.pms.api.SkuFeignClient;
|
||||
@ -54,7 +54,7 @@ public class CartServiceImpl implements CartService {
|
||||
*/
|
||||
@Override
|
||||
public boolean deleteCart() {
|
||||
String key = OmsConstants.MEMBER_CART_PREFIX + SecurityUtils.getMemberId();
|
||||
String key = OrderConstants.MEMBER_CART_PREFIX + SecurityUtils.getMemberId();
|
||||
redisTemplate.delete(key);
|
||||
return true;
|
||||
}
|
||||
@ -189,7 +189,7 @@ public class CartServiceImpl implements CartService {
|
||||
* 获取第一层,即某个用户的购物车
|
||||
*/
|
||||
private BoundHashOperations getCartHashOperations(Long memberId) {
|
||||
String cartKey = OmsConstants.MEMBER_CART_PREFIX + memberId;
|
||||
String cartKey = OrderConstants.MEMBER_CART_PREFIX + memberId;
|
||||
BoundHashOperations operations = redisTemplate.boundHashOps(cartKey);
|
||||
return operations;
|
||||
}
|
||||
|
@ -26,8 +26,8 @@ import com.youlai.common.result.Result;
|
||||
import com.youlai.common.security.util.SecurityUtils;
|
||||
import com.youlai.common.web.exception.BizException;
|
||||
import com.youlai.mall.oms.config.WxPayProperties;
|
||||
import com.youlai.mall.oms.common.enums.OrderStatusEnum;
|
||||
import com.youlai.mall.oms.common.enums.PayTypeEnum;
|
||||
import com.youlai.mall.oms.enums.OrderStatusEnum;
|
||||
import com.youlai.mall.oms.enums.PayTypeEnum;
|
||||
import com.youlai.mall.oms.converter.OrderConverter;
|
||||
import com.youlai.mall.oms.converter.OrderItemConverter;
|
||||
import com.youlai.mall.oms.mapper.OrderMapper;
|
||||
@ -68,7 +68,7 @@ import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.youlai.mall.oms.common.constant.OmsConstants.*;
|
||||
import static com.youlai.common.constant.OrderConstants.*;
|
||||
|
||||
/**
|
||||
* 订单业务实现类
|
||||
|
@ -1,23 +0,0 @@
|
||||
${AnsiColor.BRIGHT_RED} 升职加薪
|
||||
钱 ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⠀⠀⠀⠀⠀⠀
|
||||
多 ⠀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 身
|
||||
事 ⠸⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢙⢂⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 强
|
||||
少 ⢸⣱⢄⣀⣀⣀⣀⣀ ⢀⣀⣠⢀⡀⣤⠤⣚⣼⠀ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 体
|
||||
干 ⠀⠒⠿⣊⣇⣧⣾⣟⣝⣝⣿⣿⣜⡵⠵⢼⣊⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 健
|
||||
的 ⠀⠀⠀⠐⠺⠿⣿⠻⢟⣿⢍⢍⢻⣿⣿⣧⡞⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 进
|
||||
爽 ⠀⠀⠀⠀⠀⠀⣿⢠⣿⣿ ⣿⣴⣿⣿⣟⢷⣦⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 步
|
||||
⠀⠀⠀⠀⠀⢸⢿⡿⢿⣿⣿⣿ ⢫⣿⠏⢤⣿⠩⣝⣷⣤⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⣠⣄⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀ 快
|
||||
⠀⠀⠀⠀⠀⣏⡃⣘⣃⣿⠿⢛⣡⣿⠿⢶⣿⣿⣷⣬⣍⣼⡟⢻⣟⢟⠿⣿⠿⢛⣿⠿⣿⠿⢳⣶⣿⡿⠛⠛⠿⣷⣄⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠈⠉⠛⠛⠻⡿⡻⡉⠡⡀⣿⣿⣿⣿⣿⣿⣿⣷⣾⣬⣥⣴⣿⣗⣿⠡⣷⡗⣼⡷⠛⡻⣷⡀⠀⠀⠈⠻⣦⣀⠀⡔⠒⠲⡇
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢿⡷⣷⡷⡿⣿⣿⣿⣿⣿⣿⣿⡍⠛⢛⢍⣽⠏⣼⠻⣜⣻⢟⣵⣷⠿⣿⢜⣧⠀⠀⠀⠀⠀⠉⠉⠑⠒⠊⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣮⣠⣠⣴⠿⣿⣿⣿⣿⣿⣿⣿⣾⣼⣾⡿⢧⣽⣎⠿⠻⣻⡵⢻⣿⠏⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣸⡟⢹⣗⣽⠿⣿⣿⣿⣿⣿⣿⡿⣆⣌⠐⣰⣿⣄⣛⣿⣻⠛⢍⣼⣋⣵⠣⡟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⡤⡲⣴⢄⣯⣬⣿⣷⣄⣹⠅⣱⣿⣿⣿⣿⣿⣿⠌⢽⣿⡟⠛⡋⣹⡝⣷⣉⣻⣏⣉⣲⣿⣿⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⢨⡪⡭⢸⣿⠃⠀⠉⠙⠛⠳⢼⣿⣿⣿⣿⢻⢊⣢⣽⣋⣿⣾⠾⣟⣯⣷⣿⡻⢿⣿⣿⣿⣿⣿⣦⡀⢠⣀⣤⠀⠀⠀⡀⠀⠀
|
||||
⠀⠛⣠⣃⣬⣿⣁⠀⣿⣗⣤⠀⡀⠀⠀⣜⢿⣿⠏⠉⠋⠉⠉⠉⠉⠀⠀⠀⠙⠿⣿⣿⣧⠀⠈⠙⠛⠿⣿⣯⣼⠀⡃⣣⣧⣤⣾⣇⠀⠀
|
||||
⢀⣀⣴⡿⣺⡏⢻⣆⡈⠓⠻⠃⡐⠀⠀⣷⣿⠋⠀⠀⠀⠀⠀⣼⣵⠇⡀⠀⠀⠀⣸⣿⡭⠃⠀⠀⠀⠀⠈⠻⣿⡄⠀⠿⣯⣖⡽⠃⠀⠀
|
||||
⢨⣵⣭⣽⡗⡟⢿⣶⣿⡄⠀⠀⠀⠀⢠⣿⡇⠀⠀⠀⠙⡒⣥⣄⡘⠍⠀⢀⣠⣼⡿⠋⠀⠀⠀⠀⠀⠀⠀⠀⢹⣷⡀⠠⢵⠸⣤⣀⠀⠀
|
||||
⠀⠉⢈⣋⣦⣴⡄⢉⣰⣲⡂⠀⠀⢰⣭⡽⠂⠀⠀⠀⠈⠀⠐⠻⢽⠿⡳⠶⡾⠛⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣯⣵⠣⣟⣆⡃⠧⣾⡅⠀
|
||||
⠀⠀⠙⠻⠿⠛⠉⠑⠋⠉⠀⠀⠪⣀⣉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠚⠀⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠠⣱⣀⡁⠀⠀⠃
|
||||
${AnsiColor.BRIGHT_YELLOW}Spring Boot 版本:${spring-boot.version}
|
||||
${AnsiColor.BRIGHT_YELLOW}项目名称:${spring.application.name} | 端口号:${server.port}
|
@ -1,23 +0,0 @@
|
||||
server:
|
||||
port: 8603
|
||||
|
||||
spring:
|
||||
main:
|
||||
allow-circular-references: true
|
||||
mvc:
|
||||
pathmatch:
|
||||
matching-strategy: ant_path_matcher
|
||||
cloud:
|
||||
nacos:
|
||||
discovery:
|
||||
server-addr: nacos-headless.infrastructure:8848 # 使用k8s无头服务
|
||||
namespace: youlai-namespace-id
|
||||
config:
|
||||
server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
file-extension: yaml
|
||||
namespace: youlai-namespace-id
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
<artifactId>pms-api</artifactId>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.youlai</groupId>
|
||||
<artifactId>common-core</artifactId>
|
||||
@ -23,13 +24,11 @@
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
|
||||
<!-- 远程调用HTTP客户端 -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-openfeign</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
@ -9,8 +9,8 @@ import org.springframework.cloud.openfeign.EnableFeignClients;
|
||||
import org.springframework.transaction.annotation.EnableTransactionManagement;
|
||||
|
||||
@SpringBootApplication
|
||||
@EnableFeignClients(basePackageClasses = {MemberFeignClient.class})
|
||||
@EnableDiscoveryClient
|
||||
@EnableFeignClients(basePackageClasses = {MemberFeignClient.class})
|
||||
public class PmsApplication {
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(PmsApplication.class, args);
|
||||
|
@ -1,4 +1,4 @@
|
||||
package com.youlai.mall.pms.common.enums;
|
||||
package com.youlai.mall.pms.enums;
|
||||
|
||||
import lombok.Getter;
|
||||
|
@ -5,7 +5,7 @@ import cn.hutool.core.lang.Assert;
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
|
||||
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
|
||||
import com.youlai.mall.pms.common.constant.ProductConstants;
|
||||
import com.youlai.common.constant.ProductConstants;
|
||||
import com.youlai.mall.pms.mapper.PmsSkuMapper;
|
||||
import com.youlai.mall.pms.model.dto.CheckPriceDTO;
|
||||
import com.youlai.mall.pms.model.dto.SkuDTO;
|
||||
|
@ -10,8 +10,8 @@ import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
|
||||
import com.youlai.common.security.util.SecurityUtils;
|
||||
import com.youlai.mall.pms.common.constant.ProductConstants;
|
||||
import com.youlai.mall.pms.common.enums.AttributeTypeEnum;
|
||||
import com.youlai.common.constant.ProductConstants;
|
||||
import com.youlai.mall.pms.enums.AttributeTypeEnum;
|
||||
import com.youlai.mall.pms.converter.SpuAttributeConverter;
|
||||
import com.youlai.mall.pms.converter.SpuConverter;
|
||||
import com.youlai.mall.pms.mapper.PmsSpuMapper;
|
||||
|
@ -1,4 +1,4 @@
|
||||
package com.youlai.mall.pms.common.util;
|
||||
package com.youlai.mall.pms.util;
|
||||
|
||||
import com.google.common.hash.Funnel;
|
||||
import com.google.common.hash.Hashing;
|
@ -12,17 +12,20 @@ spring:
|
||||
# 注册中心
|
||||
discovery:
|
||||
server-addr: http://localhost:8848
|
||||
username: nacos
|
||||
password: nacos
|
||||
# 配置中心
|
||||
config:
|
||||
# 本地启动
|
||||
## server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
# 极速启动
|
||||
server-addr: http://f.youlai.tech:8848
|
||||
server-addr: http://localhost:8848
|
||||
file-extension: yaml
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
||||
|
||||
username: nacos
|
||||
password: nacos
|
||||
|
||||
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
server:
|
||||
port: 8802
|
||||
|
||||
spring:
|
||||
main:
|
||||
allow-circular-references: true
|
||||
mvc:
|
||||
pathmatch:
|
||||
matching-strategy: ant_path_matcher
|
||||
cloud:
|
||||
nacos:
|
||||
discovery:
|
||||
server-addr: nacos-headless.infrastructure:8848 # 使用k8s无头服务
|
||||
namespace: youlai-namespace-id
|
||||
config:
|
||||
server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
file-extension: yaml
|
||||
namespace: youlai-namespace-id
|
||||
# 公共配置
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
@ -13,7 +13,7 @@
|
||||
<result property="price" column="price" jdbcType="BIGINT"/>
|
||||
<result property="sales" column="sales" jdbcType="INTEGER"/>
|
||||
<result property="picUrl" column="pic_url" jdbcType="VARCHAR"/>
|
||||
<result property="album" column="album" jdbcType="OTHER"/>
|
||||
<result property="album" column="album" typeHandler="com.youlai.common.mybatis.handler.StringArrayJsonTypeHandler"/>
|
||||
<result property="unit" column="unit" jdbcType="VARCHAR"/>
|
||||
<result property="description" column="description" jdbcType="VARCHAR"/>
|
||||
<result property="detail" column="detail" jdbcType="VARCHAR"/>
|
||||
|
@ -12,13 +12,17 @@ spring:
|
||||
# 注册中心
|
||||
discovery:
|
||||
server-addr: http://localhost:8848
|
||||
username: nacos
|
||||
password: nacos
|
||||
# 配置中心
|
||||
config:
|
||||
# 本地启动
|
||||
## server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
# 极速启动
|
||||
server-addr: http://f.youlai.tech:8848
|
||||
server-addr: http://localhost:8848
|
||||
file-extension: yaml
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
||||
username: nacos
|
||||
password: nacos
|
@ -1,23 +0,0 @@
|
||||
server:
|
||||
port: 8804
|
||||
|
||||
spring:
|
||||
main:
|
||||
allow-circular-references: true
|
||||
mvc:
|
||||
pathmatch:
|
||||
matching-strategy: ant_path_matcher
|
||||
cloud:
|
||||
nacos:
|
||||
# 注册中心
|
||||
discovery:
|
||||
server-addr: nacos-headless.infrastructure:8848 # 使用k8s无头服务
|
||||
namespace: youlai-namespace-id
|
||||
# 配置中心
|
||||
config:
|
||||
server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
file-extension: yaml
|
||||
namespace: youlai-namespace-id
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
@ -3,22 +3,19 @@ package com.youlai.mall.ums.service.impl;
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import cn.hutool.core.lang.Assert;
|
||||
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
|
||||
import com.youlai.common.result.Result;
|
||||
import com.youlai.common.result.ResultCode;
|
||||
import com.youlai.common.security.util.SecurityUtils;
|
||||
import com.youlai.common.web.exception.BizException;
|
||||
import com.youlai.mall.pms.model.vo.ProductHistoryVO;
|
||||
import com.youlai.mall.ums.constant.UmsConstants;
|
||||
import com.youlai.common.constant.MemberConstants;
|
||||
import com.youlai.mall.ums.convert.AddressConvert;
|
||||
import com.youlai.mall.ums.convert.MemberConvert;
|
||||
import com.youlai.mall.ums.dto.MemberAddressDTO;
|
||||
import com.youlai.mall.ums.dto.MemberAuthDTO;
|
||||
import com.youlai.mall.ums.dto.MemberRegisterDto;
|
||||
import com.youlai.mall.ums.dto.MemberInfoDTO;
|
||||
import com.youlai.mall.ums.mapper.UmsMemberMapper;
|
||||
import com.youlai.mall.ums.model.entity.UmsAddress;
|
||||
import com.youlai.mall.ums.model.entity.UmsMember;
|
||||
@ -29,7 +26,6 @@ import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.data.redis.core.RedisTemplate;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
@ -61,7 +57,7 @@ public class UmsMemberServiceImpl extends ServiceImpl<UmsMemberMapper, UmsMember
|
||||
@Override
|
||||
public void addProductViewHistory(ProductHistoryVO product, Long userId) {
|
||||
if (userId != null) {
|
||||
String key = UmsConstants.USER_PRODUCT_HISTORY + userId;
|
||||
String key = MemberConstants.USER_PRODUCT_HISTORY + userId;
|
||||
redisTemplate.opsForZSet().add(key, product, System.currentTimeMillis());
|
||||
Long size = redisTemplate.opsForZSet().size(key);
|
||||
if (size > 10) {
|
||||
@ -72,7 +68,7 @@ public class UmsMemberServiceImpl extends ServiceImpl<UmsMemberMapper, UmsMember
|
||||
|
||||
@Override
|
||||
public Set<ProductHistoryVO> getProductViewHistory(Long userId) {
|
||||
return redisTemplate.opsForZSet().reverseRange(UmsConstants.USER_PRODUCT_HISTORY + userId, 0, 9);
|
||||
return redisTemplate.opsForZSet().reverseRange(MemberConstants.USER_PRODUCT_HISTORY + userId, 0, 9);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,21 +0,0 @@
|
||||
server:
|
||||
port: 8601
|
||||
|
||||
spring:
|
||||
main:
|
||||
allow-circular-references: true
|
||||
mvc:
|
||||
pathmatch:
|
||||
matching-strategy: ant_path_matcher
|
||||
cloud:
|
||||
nacos:
|
||||
discovery:
|
||||
server-addr: nacos-headless.infrastructure:8848 # 使用k8s无头服务
|
||||
namespace: youlai-namespace-id
|
||||
config:
|
||||
server-addr: ${spring.cloud.nacos.discovery.server-addr}
|
||||
file-extension: yaml
|
||||
namespace: youlai-namespace-id
|
||||
shared-configs[0]:
|
||||
data-id: youlai-common.yaml
|
||||
refresh: true
|
@ -1,24 +0,0 @@
|
||||
@echo off
|
||||
rem Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
rem Licensed under the Apache License, Version 2.0 (the "License");
|
||||
rem you may not use this file except in compliance with the License.
|
||||
rem You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
if not exist "%JAVA_HOME%\bin\jps.exe" echo Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better! & EXIT /B 1
|
||||
|
||||
setlocal
|
||||
|
||||
set "PATH=%JAVA_HOME%\bin;%PATH%"
|
||||
|
||||
echo killing nacos server
|
||||
|
||||
for /f "tokens=1" %%i in ('jps -m ^| find "nacos.nacos"') do ( taskkill /F /PID %%i )
|
||||
|
||||
echo Done!
|
@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cd `dirname $0`/../target
|
||||
target_dir=`pwd`
|
||||
|
||||
pid=`ps ax | grep -i 'nacos.nacos' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
|
||||
if [ -z "$pid" ] ; then
|
||||
echo "No nacosServer running."
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
echo "The nacosServer(${pid}) is running..."
|
||||
|
||||
kill ${pid}
|
||||
|
||||
echo "Send shutdown request to nacosServer(${pid}) OK"
|
@ -1,95 +0,0 @@
|
||||
@echo off
|
||||
rem Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
rem Licensed under the Apache License, Version 2.0 (the "License");
|
||||
rem you may not use this file except in compliance with the License.
|
||||
rem You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
if not exist "%JAVA_HOME%\bin\java.exe" echo Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better! & EXIT /B 1
|
||||
set "JAVA=%JAVA_HOME%\bin\java.exe"
|
||||
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
set BASE_DIR=%~dp0
|
||||
rem added double quotation marks to avoid the issue caused by the folder names containing spaces.
|
||||
rem removed the last 5 chars(which means \bin\) to get the base DIR.
|
||||
set BASE_DIR="%BASE_DIR:~0,-5%"
|
||||
|
||||
set CUSTOM_SEARCH_LOCATIONS=file:%BASE_DIR%/conf/
|
||||
|
||||
set MODE="cluster"
|
||||
set FUNCTION_MODE="all"
|
||||
set SERVER=nacos-server
|
||||
set MODE_INDEX=-1
|
||||
set FUNCTION_MODE_INDEX=-1
|
||||
set SERVER_INDEX=-1
|
||||
set EMBEDDED_STORAGE_INDEX=-1
|
||||
set EMBEDDED_STORAGE=""
|
||||
|
||||
|
||||
set i=0
|
||||
for %%a in (%*) do (
|
||||
if "%%a" == "-m" ( set /a MODE_INDEX=!i!+1 )
|
||||
if "%%a" == "-f" ( set /a FUNCTION_MODE_INDEX=!i!+1 )
|
||||
if "%%a" == "-s" ( set /a SERVER_INDEX=!i!+1 )
|
||||
if "%%a" == "-p" ( set /a EMBEDDED_STORAGE_INDEX=!i!+1 )
|
||||
set /a i+=1
|
||||
)
|
||||
|
||||
set i=0
|
||||
for %%a in (%*) do (
|
||||
if %MODE_INDEX% == !i! ( set MODE="%%a" )
|
||||
if %FUNCTION_MODE_INDEX% == !i! ( set FUNCTION_MODE="%%a" )
|
||||
if %SERVER_INDEX% == !i! (set SERVER="%%a")
|
||||
if %EMBEDDED_STORAGE_INDEX% == !i! (set EMBEDDED_STORAGE="%%a")
|
||||
set /a i+=1
|
||||
)
|
||||
|
||||
rem if nacos startup mode is standalone
|
||||
if %MODE% == "standalone" (
|
||||
echo "nacos is starting with standalone"
|
||||
set "NACOS_OPTS=-Dnacos.standalone=true"
|
||||
set "NACOS_JVM_OPTS=-Xms512m -Xmx512m -Xmn256m"
|
||||
)
|
||||
|
||||
rem if nacos startup mode is cluster
|
||||
if %MODE% == "cluster" (
|
||||
echo "nacos is starting with cluster"
|
||||
if %EMBEDDED_STORAGE% == "embedded" (
|
||||
set "NACOS_OPTS=-DembeddedStorage=true"
|
||||
)
|
||||
|
||||
set "NACOS_JVM_OPTS=-server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=%BASE_DIR%\logs\java_heapdump.hprof -XX:-UseLargePages"
|
||||
)
|
||||
|
||||
rem set nacos's functionMode
|
||||
if %FUNCTION_MODE% == "config" (
|
||||
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.functionMode=config"
|
||||
)
|
||||
|
||||
if %FUNCTION_MODE% == "naming" (
|
||||
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.functionMode=naming"
|
||||
)
|
||||
|
||||
rem set nacos options
|
||||
set "NACOS_OPTS=%NACOS_OPTS% -Dloader.path=%BASE_DIR%/plugins/health,%BASE_DIR%/plugins/cmdb"
|
||||
set "NACOS_OPTS=%NACOS_OPTS% -Dnacos.home=%BASE_DIR%"
|
||||
set "NACOS_OPTS=%NACOS_OPTS% -jar %BASE_DIR%\target\%SERVER%.jar"
|
||||
|
||||
rem set nacos spring config location
|
||||
set "NACOS_CONFIG_OPTS=--spring.config.additional-location=%CUSTOM_SEARCH_LOCATIONS%"
|
||||
|
||||
rem set nacos log4j file location
|
||||
set "NACOS_LOG4J_OPTS=--logging.config=%BASE_DIR%/conf/nacos-logback.xml"
|
||||
|
||||
|
||||
set COMMAND="%JAVA%" %NACOS_JVM_OPTS% %NACOS_OPTS% %NACOS_CONFIG_OPTS% %NACOS_LOG4J_OPTS% nacos.nacos %*
|
||||
|
||||
rem start nacos command
|
||||
%COMMAND%
|
@ -1,142 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
cygwin=false
|
||||
darwin=false
|
||||
os400=false
|
||||
case "`uname`" in
|
||||
CYGWIN*) cygwin=true;;
|
||||
Darwin*) darwin=true;;
|
||||
OS400*) os400=true;;
|
||||
esac
|
||||
error_exit ()
|
||||
{
|
||||
echo "ERROR: $1 !!"
|
||||
exit 1
|
||||
}
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/opt/taobao/java
|
||||
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if $darwin; then
|
||||
|
||||
if [ -x '/usr/libexec/java_home' ] ; then
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
|
||||
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
|
||||
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
|
||||
fi
|
||||
else
|
||||
JAVA_PATH=`dirname $(readlink -f $(which javac))`
|
||||
if [ "x$JAVA_PATH" != "x" ]; then
|
||||
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
|
||||
fi
|
||||
fi
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
|
||||
fi
|
||||
fi
|
||||
|
||||
export SERVER="nacos-server"
|
||||
export MODE="cluster"
|
||||
export FUNCTION_MODE="all"
|
||||
export MEMBER_LIST=""
|
||||
export EMBEDDED_STORAGE=""
|
||||
while getopts ":m:f:s:c:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
m)
|
||||
MODE=$OPTARG;;
|
||||
f)
|
||||
FUNCTION_MODE=$OPTARG;;
|
||||
s)
|
||||
SERVER=$OPTARG;;
|
||||
c)
|
||||
MEMBER_LIST=$OPTARG;;
|
||||
p)
|
||||
EMBEDDED_STORAGE=$OPTARG;;
|
||||
?)
|
||||
echo "Unknown parameter"
|
||||
exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
export JAVA_HOME
|
||||
export JAVA="$JAVA_HOME/bin/java"
|
||||
export BASE_DIR=`cd $(dirname $0)/..; pwd`
|
||||
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
|
||||
|
||||
#===========================================================================================
|
||||
# JVM Configuration
|
||||
#===========================================================================================
|
||||
if [[ "${MODE}" == "standalone" ]]; then
|
||||
JAVA_OPT="${JAVA_OPT} -Xms512m -Xmx512m -Xmn256m"
|
||||
JAVA_OPT="${JAVA_OPT} -Dnacos.standalone=true"
|
||||
else
|
||||
if [[ "${EMBEDDED_STORAGE}" == "embedded" ]]; then
|
||||
JAVA_OPT="${JAVA_OPT} -DembeddedStorage=true"
|
||||
fi
|
||||
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
|
||||
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
|
||||
JAVA_OPT="${JAVA_OPT} -XX:-UseLargePages"
|
||||
|
||||
fi
|
||||
|
||||
if [[ "${FUNCTION_MODE}" == "config" ]]; then
|
||||
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=config"
|
||||
elif [[ "${FUNCTION_MODE}" == "naming" ]]; then
|
||||
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=naming"
|
||||
fi
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -Dnacos.member.list=${MEMBER_LIST}"
|
||||
|
||||
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/nacos_gc.log:time,tags:filecount=10,filesize=102400"
|
||||
else
|
||||
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
|
||||
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/nacos_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||
fi
|
||||
|
||||
JAVA_OPT="${JAVA_OPT} -Dloader.path=${BASE_DIR}/plugins/health,${BASE_DIR}/plugins/cmdb"
|
||||
JAVA_OPT="${JAVA_OPT} -Dnacos.home=${BASE_DIR}"
|
||||
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${SERVER}.jar"
|
||||
JAVA_OPT="${JAVA_OPT} ${JAVA_OPT_EXT}"
|
||||
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
|
||||
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/nacos-logback.xml"
|
||||
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
|
||||
|
||||
if [ ! -d "${BASE_DIR}/logs" ]; then
|
||||
mkdir ${BASE_DIR}/logs
|
||||
fi
|
||||
|
||||
echo "$JAVA ${JAVA_OPT}"
|
||||
|
||||
if [[ "${MODE}" == "standalone" ]]; then
|
||||
echo "nacos is starting with standalone"
|
||||
else
|
||||
echo "nacos is starting with cluster"
|
||||
fi
|
||||
|
||||
# check the start.out log output file
|
||||
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
|
||||
touch "${BASE_DIR}/logs/start.out"
|
||||
fi
|
||||
# start
|
||||
echo "$JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
nohup $JAVA ${JAVA_OPT} nacos.nacos >> ${BASE_DIR}/logs/start.out 2>&1 &
|
||||
echo "nacos is starting,you can check the ${BASE_DIR}/logs/start.out"
|
@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
ALTER TABLE `config_info_tag`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `his_config_info`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `config_info`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `config_info_beta`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
@ -1,201 +0,0 @@
|
||||
#
|
||||
# Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#*************** Spring Boot Related Configurations ***************#
|
||||
### Default web context path:
|
||||
server.servlet.contextPath=/nacos
|
||||
### Default web server port:
|
||||
server.port=8848
|
||||
|
||||
#*************** Network Related Configurations ***************#
|
||||
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
|
||||
# nacos.inetutils.prefer-hostname-over-ip=false
|
||||
|
||||
### Specify local server's IP:
|
||||
# nacos.inetutils.ip-address=
|
||||
|
||||
|
||||
#*************** Config Module Related Configurations ***************#
|
||||
### If use MySQL as datasource:
|
||||
# spring.datasource.platform=mysql
|
||||
|
||||
### Count of DB:
|
||||
# db.num=1
|
||||
|
||||
### Connect URL of DB:
|
||||
# db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
|
||||
# db.user.0=nacos
|
||||
# db.password.0=nacos
|
||||
|
||||
### Connection pool configuration: hikariCP
|
||||
db.pool.config.connectionTimeout=30000
|
||||
db.pool.config.validationTimeout=10000
|
||||
db.pool.config.maximumPoolSize=20
|
||||
db.pool.config.minimumIdle=2
|
||||
|
||||
#*************** Naming Module Related Configurations ***************#
|
||||
### Data dispatch task execution period in milliseconds:
|
||||
# nacos.naming.distro.taskDispatchPeriod=200
|
||||
|
||||
### Data count of batch sync task:
|
||||
# nacos.naming.distro.batchSyncKeyCount=1000
|
||||
|
||||
### Retry delay in milliseconds if sync task failed:
|
||||
# nacos.naming.distro.syncRetryDelay=5000
|
||||
|
||||
### If enable data warmup. If set to false, the server would accept request without local data preparation:
|
||||
# nacos.naming.data.warmup=true
|
||||
|
||||
### If enable the instance auto expiration, kind like of health check of instance:
|
||||
# nacos.naming.expireInstance=true
|
||||
|
||||
nacos.naming.empty-service.auto-clean=true
|
||||
nacos.naming.empty-service.clean.initial-delay-ms=50000
|
||||
nacos.naming.empty-service.clean.period-time-ms=30000
|
||||
|
||||
### Add in 2.0.0
|
||||
### The interval to clean empty service
|
||||
# nacos.naming.clean.empty-service.interval=60000
|
||||
|
||||
### The expired time to clean empty service
|
||||
# nacos.naming.clean.empty-service.expired-time=60000
|
||||
|
||||
### The interval to clean expired metadata
|
||||
# nacos.naming.clean.expired-metadata.interval=5000
|
||||
|
||||
### The expired time to clean metadata
|
||||
# nacos.naming.clean.expired-metadata.expired-time=60000
|
||||
|
||||
#*************** CMDB Module Related Configurations ***************#
|
||||
### The interval to dump external CMDB in seconds:
|
||||
# nacos.cmdb.dumpTaskInterval=3600
|
||||
|
||||
### The interval of polling data change event in seconds:
|
||||
# nacos.cmdb.eventTaskInterval=10
|
||||
|
||||
### The interval of loading labels in seconds:
|
||||
# nacos.cmdb.labelTaskInterval=300
|
||||
|
||||
### If turn on data loading task:
|
||||
# nacos.cmdb.loadDataAtStart=false
|
||||
|
||||
|
||||
#*************** Metrics Related Configurations ***************#
|
||||
### Metrics for prometheus
|
||||
#management.endpoints.web.exposure.include=*
|
||||
|
||||
### Metrics for elastic search
|
||||
management.metrics.export.elastic.enabled=false
|
||||
#management.metrics.export.elastic.host=http://localhost:9200
|
||||
|
||||
### Metrics for influx
|
||||
management.metrics.export.influx.enabled=false
|
||||
#management.metrics.export.influx.db=springboot
|
||||
#management.metrics.export.influx.uri=http://localhost:8086
|
||||
#management.metrics.export.influx.auto-create-db=true
|
||||
#management.metrics.export.influx.consistency=one
|
||||
#management.metrics.export.influx.compressed=true
|
||||
|
||||
|
||||
#*************** Access Log Related Configurations ***************#
|
||||
### If turn on the access log:
|
||||
server.tomcat.accesslog.enabled=true
|
||||
|
||||
### The access log pattern:
|
||||
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
|
||||
|
||||
### The directory of access log:
|
||||
server.tomcat.basedir=
|
||||
|
||||
|
||||
#*************** Access Control Related Configurations ***************#
|
||||
### If enable spring security, this option is deprecated in 1.2.0:
|
||||
#spring.security.enabled=false
|
||||
|
||||
### The ignore urls of auth, is deprecated in 1.2.0:
|
||||
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
|
||||
|
||||
### The auth system to use, currently only 'nacos' is supported:
|
||||
nacos.core.auth.system.type=nacos
|
||||
|
||||
### If turn on auth system:
|
||||
nacos.core.auth.enabled=false
|
||||
|
||||
### The token expiration in seconds:
|
||||
nacos.core.auth.default.token.expire.seconds=18000
|
||||
|
||||
### The default token:
|
||||
nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
|
||||
|
||||
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
|
||||
nacos.core.auth.caching.enabled=true
|
||||
|
||||
### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version.
|
||||
nacos.core.auth.enable.userAgentAuthWhite=true
|
||||
|
||||
### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false.
|
||||
### The two properties is the white list for auth and used by identity the request from other server.
|
||||
nacos.core.auth.server.identity.key=
|
||||
nacos.core.auth.server.identity.value=
|
||||
|
||||
#*************** Istio Related Configurations ***************#
|
||||
### If turn on the MCP server:
|
||||
nacos.istio.mcp.server.enabled=false
|
||||
|
||||
|
||||
|
||||
###*************** Add from 1.3.0 ***************###
|
||||
|
||||
|
||||
#*************** Core Related Configurations ***************#
|
||||
|
||||
### set the WorkerID manually
|
||||
# nacos.core.snowflake.worker-id=
|
||||
|
||||
### Member-MetaData
|
||||
# nacos.core.member.meta.site=
|
||||
# nacos.core.member.meta.adweight=
|
||||
# nacos.core.member.meta.weight=
|
||||
|
||||
### MemberLookup
|
||||
### Addressing pattern category, If set, the priority is highest
|
||||
# nacos.core.member.lookup.type=[file,address-server]
|
||||
## Set the cluster list with a configuration file or command-line argument
|
||||
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
|
||||
## for AddressServerMemberLookup
|
||||
# Maximum number of retries to query the address server upon initialization
|
||||
# nacos.core.address-server.retry=5
|
||||
## Server domain name address of [address-server] mode
|
||||
# address.server.domain=jmenv.tbsite.net
|
||||
## Server port of [address-server] mode
|
||||
# address.server.port=8080
|
||||
## Request address of [address-server] mode
|
||||
# address.server.url=/nacos/serverlist
|
||||
|
||||
#*************** JRaft Related Configurations ***************#
|
||||
|
||||
### Sets the Raft cluster election timeout, default value is 5 second
|
||||
# nacos.core.protocol.raft.data.election_timeout_ms=5000
|
||||
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
|
||||
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
|
||||
### raft internal worker threads
|
||||
# nacos.core.protocol.raft.data.core_thread_num=8
|
||||
### Number of threads required for raft business request processing
|
||||
# nacos.core.protocol.raft.data.cli_service_thread_num=4
|
||||
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
|
||||
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
|
||||
### rpc request timeout, default 5 seconds
|
||||
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
|
@ -1,177 +0,0 @@
|
||||
#
|
||||
# Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#*************** Spring Boot Related Configurations ***************#
|
||||
### Default web context path:
|
||||
server.servlet.contextPath=/nacos
|
||||
### Default web server port:
|
||||
server.port=8848
|
||||
|
||||
#*************** Network Related Configurations ***************#
|
||||
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
|
||||
# nacos.inetutils.prefer-hostname-over-ip=false
|
||||
|
||||
### Specify local server's IP:
|
||||
# nacos.inetutils.ip-address=
|
||||
|
||||
|
||||
#*************** Config Module Related Configurations ***************#
|
||||
### If use MySQL as datasource:
|
||||
# spring.datasource.platform=mysql
|
||||
|
||||
### Count of DB:
|
||||
# db.num=1
|
||||
|
||||
### Connect URL of DB:
|
||||
# db.url.0=jdbc:mysql://127.0.0.1:3306/nacos?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
|
||||
# db.user.0=nacos
|
||||
# db.password.0=nacos
|
||||
|
||||
|
||||
#*************** Naming Module Related Configurations ***************#
|
||||
### Data dispatch task execution period in milliseconds:
|
||||
# nacos.naming.distro.taskDispatchPeriod=200
|
||||
|
||||
### Data count of batch sync task:
|
||||
# nacos.naming.distro.batchSyncKeyCount=1000
|
||||
|
||||
### Retry delay in milliseconds if sync task failed:
|
||||
# nacos.naming.distro.syncRetryDelay=5000
|
||||
|
||||
### If enable data warmup. If set to false, the server would accept request without local data preparation:
|
||||
# nacos.naming.data.warmup=true
|
||||
|
||||
### If enable the instance auto expiration, kind like of health check of instance:
|
||||
# nacos.naming.expireInstance=true
|
||||
|
||||
nacos.naming.empty-service.auto-clean=true
|
||||
nacos.naming.empty-service.clean.initial-delay-ms=50000
|
||||
nacos.naming.empty-service.clean.period-time-ms=30000
|
||||
|
||||
|
||||
#*************** CMDB Module Related Configurations ***************#
|
||||
### The interval to dump external CMDB in seconds:
|
||||
# nacos.cmdb.dumpTaskInterval=3600
|
||||
|
||||
### The interval of polling data change event in seconds:
|
||||
# nacos.cmdb.eventTaskInterval=10
|
||||
|
||||
### The interval of loading labels in seconds:
|
||||
# nacos.cmdb.labelTaskInterval=300
|
||||
|
||||
### If turn on data loading task:
|
||||
# nacos.cmdb.loadDataAtStart=false
|
||||
|
||||
|
||||
#*************** Metrics Related Configurations ***************#
|
||||
### Metrics for prometheus
|
||||
#management.endpoints.web.exposure.include=*
|
||||
|
||||
### Metrics for elastic search
|
||||
management.metrics.export.elastic.enabled=false
|
||||
#management.metrics.export.elastic.host=http://localhost:9200
|
||||
|
||||
### Metrics for influx
|
||||
management.metrics.export.influx.enabled=false
|
||||
#management.metrics.export.influx.db=springboot
|
||||
#management.metrics.export.influx.uri=http://localhost:8086
|
||||
#management.metrics.export.influx.auto-create-db=true
|
||||
#management.metrics.export.influx.consistency=one
|
||||
#management.metrics.export.influx.compressed=true
|
||||
|
||||
|
||||
#*************** Access Log Related Configurations ***************#
|
||||
### If turn on the access log:
|
||||
server.tomcat.accesslog.enabled=true
|
||||
|
||||
### The access log pattern:
|
||||
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
|
||||
|
||||
### The directory of access log:
|
||||
server.tomcat.basedir=
|
||||
|
||||
|
||||
#*************** Access Control Related Configurations ***************#
|
||||
### If enable spring security, this option is deprecated in 1.2.0:
|
||||
#spring.security.enabled=false
|
||||
|
||||
### The ignore urls of auth, is deprecated in 1.2.0:
|
||||
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
|
||||
|
||||
### The auth system to use, currently only 'nacos' is supported:
|
||||
nacos.core.auth.system.type=nacos
|
||||
|
||||
### If turn on auth system:
|
||||
nacos.core.auth.enabled=false
|
||||
|
||||
### The token expiration in seconds:
|
||||
nacos.core.auth.default.token.expire.seconds=18000
|
||||
|
||||
### The default token:
|
||||
nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
|
||||
|
||||
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
|
||||
nacos.core.auth.caching.enabled=true
|
||||
|
||||
|
||||
#*************** Istio Related Configurations ***************#
|
||||
### If turn on the MCP server:
|
||||
nacos.istio.mcp.server.enabled=false
|
||||
|
||||
|
||||
|
||||
###*************** Add from 1.3.0 ***************###
|
||||
|
||||
|
||||
#*************** Core Related Configurations ***************#
|
||||
|
||||
### set the WorkerID manually
|
||||
# nacos.core.snowflake.worker-id=
|
||||
|
||||
### Member-MetaData
|
||||
# nacos.core.member.meta.site=
|
||||
# nacos.core.member.meta.adweight=
|
||||
# nacos.core.member.meta.weight=
|
||||
|
||||
### MemberLookup
|
||||
### Addressing pattern category, If set, the priority is highest
|
||||
# nacos.core.member.lookup.type=[file,address-server]
|
||||
## Set the cluster list with a configuration file or command-line argument
|
||||
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
|
||||
## for AddressServerMemberLookup
|
||||
# Maximum number of retries to query the address server upon initialization
|
||||
# nacos.core.address-server.retry=5
|
||||
## Server domain name address of [address-server] mode
|
||||
# address.server.domain=jmenv.tbsite.net
|
||||
## Server port of [address-server] mode
|
||||
# address.server.port=8080
|
||||
## Request address of [address-server] mode
|
||||
# address.server.url=/nacos/serverlist
|
||||
|
||||
#*************** JRaft Related Configurations ***************#
|
||||
|
||||
### Sets the Raft cluster election timeout, default value is 5 second
|
||||
# nacos.core.protocol.raft.data.election_timeout_ms=5000
|
||||
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
|
||||
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
|
||||
### raft internal worker threads
|
||||
# nacos.core.protocol.raft.data.core_thread_num=8
|
||||
### Number of threads required for raft business request processing
|
||||
# nacos.core.protocol.raft.data.cli_service_thread_num=4
|
||||
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
|
||||
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
|
||||
### rpc request timeout, default 5 seconds
|
||||
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
|
@ -1,21 +0,0 @@
|
||||
#
|
||||
# Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#it is ip
|
||||
#example
|
||||
192.168.16.101:8847
|
||||
192.168.16.102
|
||||
192.168.16.103
|
@ -1,778 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
~
|
||||
~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
~ you may not use this file except in compliance with the License.
|
||||
~ You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<configuration scan="true" scanPeriod="10 seconds">
|
||||
|
||||
<springProperty scope="context" name="logPath" source="nacos.logs.path" defaultValue="${nacos.home}/logs"/>
|
||||
<property name="LOG_HOME" value="${logPath}"/>
|
||||
|
||||
<appender name="cmdb-main"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${nacos.home}/logs/cmdb-main.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${nacos.home}/logs/cmdb-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="naming-server"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-server.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="async-naming-server" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<queueSize>1024</queueSize>
|
||||
<neverBlock>true</neverBlock>
|
||||
<appender-ref ref="naming-server"/>
|
||||
</appender>
|
||||
|
||||
<appender name="naming-raft"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-raft.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="async-naming-raft" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<queueSize>1024</queueSize>
|
||||
<neverBlock>true</neverBlock>
|
||||
<appender-ref ref="naming-raft"/>
|
||||
</appender>
|
||||
|
||||
|
||||
<appender name="naming-distro"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-distro.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="async-naming-distro" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<queueSize>1024</queueSize>
|
||||
<neverBlock>true</neverBlock>
|
||||
<appender-ref ref="naming-distro"/>
|
||||
</appender>
|
||||
|
||||
<appender name="naming-event"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-event.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-event.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="async-naming-event" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<queueSize>1024</queueSize>
|
||||
<neverBlock>true</neverBlock>
|
||||
<appender-ref ref="naming-event"/>
|
||||
</appender>
|
||||
|
||||
<appender name="naming-push"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-push.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-push.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="naming-rt"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-rt.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-rt.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%msg%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="naming-performance"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/naming-performance.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/naming-performance.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--config module logback config-->
|
||||
<appender name="dumpFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-dump.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-dump.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="pullFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-pull.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-pull.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>20MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>128MB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="fatalFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-fatal.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-fatal.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>20MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>128MB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="memoryFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-memory.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-memory.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>20MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>128MB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="pullCheckFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-pull-check.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-pull-check.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%msg%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="clientLog"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-client-request.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-client-request.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date|%msg%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="traceLog"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-trace.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-trace.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date|%msg%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="notifyLog"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-notify.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-notify.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>1GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>3GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="startLog"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/config-server.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/config-server.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>50MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>512MB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="rootFile"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/nacos.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/nacos.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>50MB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>512MB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="nacos-address"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/nacos-address.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/nacos-address.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="istio-main"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/istio-main.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/istio-main.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="core-auth"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/core-auth.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/core-auth.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="protocol-raft"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/protocol-raft.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/protocol-raft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="protocol-distro"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/protocol-distro.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/protocol-distro.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="nacos-cluster"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/nacos-cluster.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/nacos-cluster.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="alipay-jraft"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/alipay-jraft.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/alipay-jraft.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
||||
<!--TPS control -->
|
||||
<appender name="tps-control"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/tps-control.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/tps-control.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="tps-control-digest"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/tps-control-digest.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/tps-control-digest.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="tps-control-detail"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/tps-control-detail.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/tps-control-detail.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
||||
<appender name="remote"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/remote.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/remote.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="remote-digest"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/remote-digest.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/remote-digest.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="remote-push"
|
||||
class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/remote-push.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/remote-push.log.%d{yyyy-MM-dd}.%i</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<maxHistory>7</maxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>%date %level %msg%n%n</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
||||
<logger name="com.alibaba.nacos.address.main" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="nacos-address"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.cmdb.main" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="cmdb-main"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.remote" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="remote"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.core.remote.push" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="remote-push"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.remote.digest" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="remote-digest"/>
|
||||
</logger>
|
||||
|
||||
<!-- TPS Control-->
|
||||
<logger name="com.alibaba.nacos.core.remote.control.digest" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="tps-control-digest"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.remote.control.detail" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="tps-control-detail"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.remote.control" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="tps-control"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.naming.main" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="async-naming-server"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.raft" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="async-naming-raft"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.distro" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="async-naming-distro"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.event" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="async-naming-event"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.push" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="naming-push"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.rt" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="naming-rt"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.naming.performance" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="naming-performance"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.config.dumpLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="dumpFile"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.config.pullLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="pullFile"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.config.pullCheckLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="pullCheckFile"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.config.fatal" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="fatalFile"/>
|
||||
</logger>
|
||||
<logger name="com.alibaba.nacos.config.monitorLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="memoryFile"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.config.clientLog" additivity="false">
|
||||
<level value="info"/>
|
||||
<appender-ref ref="clientLog"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.config.notifyLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="notifyLog"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.config.traceLog" additivity="false">
|
||||
<level value="info"/>
|
||||
<appender-ref ref="traceLog"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.config.startLog" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="startLog"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.istio.main" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="istio-main"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.auth" additivity="false">
|
||||
<level value="DEBUG"/>
|
||||
<appender-ref ref="core-auth"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.protocol.raft" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="protocol-raft"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alipay.sofa.jraft" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="alipay-jraft"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.protocol.distro" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="protocol-distro"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.cluster" additivity="false">
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="nacos-cluster"/>
|
||||
</logger>
|
||||
|
||||
<springProfile name="standalone">
|
||||
<logger name="org.springframework">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<logger name="org.apache.catalina.startup.DigesterFactory">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<logger name="org.apache.catalina.util.LifecycleBase">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="ERROR"/>
|
||||
</logger>
|
||||
|
||||
<logger name="org.apache.coyote.http11.Http11NioProtocol">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="WARN"/>
|
||||
</logger>
|
||||
|
||||
<logger name="org.apache.tomcat.util.net.NioSelectorPool">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="WARN"/>
|
||||
</logger>
|
||||
</springProfile>
|
||||
|
||||
<logger name="com.alibaba.nacos.core.listener.StartingApplicationListener">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.common.notify.NotifyCenter">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.sys.file.WatchFileCenter">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<logger name="com.alibaba.nacos.common.executor.ThreadPoolManager">
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
<level value="INFO"/>
|
||||
</logger>
|
||||
|
||||
<root>
|
||||
<level value="INFO"/>
|
||||
<appender-ref ref="rootFile"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
@ -1,218 +0,0 @@
|
||||
/*
|
||||
* Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(255) DEFAULT NULL,
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`app_name` varchar(128) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
`c_desc` varchar(256) DEFAULT NULL,
|
||||
`c_use` varchar(64) DEFAULT NULL,
|
||||
`effect` varchar(64) DEFAULT NULL,
|
||||
`type` varchar(64) DEFAULT NULL,
|
||||
`c_schema` text,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_aggr */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_aggr` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(255) NOT NULL COMMENT 'group_id',
|
||||
`datum_id` varchar(255) NOT NULL COMMENT 'datum_id',
|
||||
`content` longtext NOT NULL COMMENT '内容',
|
||||
`gmt_modified` datetime NOT NULL COMMENT '修改时间',
|
||||
`app_name` varchar(128) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfoaggr_datagrouptenantdatum` (`data_id`,`group_id`,`tenant_id`,`datum_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='增加租户字段';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_beta */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_beta` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`beta_ips` varchar(1024) DEFAULT NULL COMMENT 'betaIps',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfobeta_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_beta';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_info_tag */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_info_tag` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`tag_id` varchar(128) NOT NULL COMMENT 'tag_id',
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL COMMENT 'content',
|
||||
`md5` varchar(32) DEFAULT NULL COMMENT 'md5',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
`src_user` text COMMENT 'source user',
|
||||
`src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_configinfotag_datagrouptenanttag` (`data_id`,`group_id`,`tenant_id`,`tag_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info_tag';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = config_tags_relation */
|
||||
/******************************************/
|
||||
CREATE TABLE `config_tags_relation` (
|
||||
`id` bigint(20) NOT NULL COMMENT 'id',
|
||||
`tag_name` varchar(128) NOT NULL COMMENT 'tag_name',
|
||||
`tag_type` varchar(64) DEFAULT NULL COMMENT 'tag_type',
|
||||
`data_id` varchar(255) NOT NULL COMMENT 'data_id',
|
||||
`group_id` varchar(128) NOT NULL COMMENT 'group_id',
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT 'tenant_id',
|
||||
`nid` bigint(20) NOT NULL AUTO_INCREMENT,
|
||||
PRIMARY KEY (`nid`),
|
||||
UNIQUE KEY `uk_configtagrelation_configidtag` (`id`,`tag_name`,`tag_type`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_tag_relation';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = group_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `group_capacity` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`group_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Group ID,空字符表示整个集群',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数,,0表示使用默认值',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_group_id` (`group_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='集群、各Group容量信息表';
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = his_config_info */
|
||||
/******************************************/
|
||||
CREATE TABLE `his_config_info` (
|
||||
`id` bigint(64) unsigned NOT NULL,
|
||||
`nid` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
|
||||
`data_id` varchar(255) NOT NULL,
|
||||
`group_id` varchar(128) NOT NULL,
|
||||
`app_name` varchar(128) DEFAULT NULL COMMENT 'app_name',
|
||||
`content` longtext NOT NULL,
|
||||
`md5` varchar(32) DEFAULT NULL,
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`src_user` text,
|
||||
`src_ip` varchar(50) DEFAULT NULL,
|
||||
`op_type` char(10) DEFAULT NULL,
|
||||
`tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
|
||||
PRIMARY KEY (`nid`),
|
||||
KEY `idx_gmt_create` (`gmt_create`),
|
||||
KEY `idx_gmt_modified` (`gmt_modified`),
|
||||
KEY `idx_did` (`data_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='多租户改造';
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* 数据库全名 = nacos_config */
|
||||
/* 表名称 = tenant_capacity */
|
||||
/******************************************/
|
||||
CREATE TABLE `tenant_capacity` (
|
||||
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键ID',
|
||||
`tenant_id` varchar(128) NOT NULL DEFAULT '' COMMENT 'Tenant ID',
|
||||
`quota` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '配额,0表示使用默认值',
|
||||
`usage` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '使用量',
|
||||
`max_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_aggr_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '聚合子配置最大个数',
|
||||
`max_aggr_size` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '单个聚合数据的子配置大小上限,单位为字节,0表示使用默认值',
|
||||
`max_history_count` int(10) unsigned NOT NULL DEFAULT '0' COMMENT '最大变更历史数量',
|
||||
`gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
|
||||
`gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='租户容量信息表';
|
||||
|
||||
|
||||
CREATE TABLE `tenant_info` (
|
||||
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`kp` varchar(128) NOT NULL COMMENT 'kp',
|
||||
`tenant_id` varchar(128) default '' COMMENT 'tenant_id',
|
||||
`tenant_name` varchar(128) default '' COMMENT 'tenant_name',
|
||||
`tenant_desc` varchar(256) DEFAULT NULL COMMENT 'tenant_desc',
|
||||
`create_source` varchar(32) DEFAULT NULL COMMENT 'create_source',
|
||||
`gmt_create` bigint(20) NOT NULL COMMENT '创建时间',
|
||||
`gmt_modified` bigint(20) NOT NULL COMMENT '修改时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tenant_info_kptenantid` (`kp`,`tenant_id`),
|
||||
KEY `idx_tenant_id` (`tenant_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='tenant_info';
|
||||
|
||||
CREATE TABLE `users` (
|
||||
`username` varchar(50) NOT NULL PRIMARY KEY,
|
||||
`password` varchar(500) NOT NULL,
|
||||
`enabled` boolean NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE `roles` (
|
||||
`username` varchar(50) NOT NULL,
|
||||
`role` varchar(50) NOT NULL,
|
||||
UNIQUE INDEX `idx_user_role` (`username` ASC, `role` ASC) USING BTREE
|
||||
);
|
||||
|
||||
CREATE TABLE `permissions` (
|
||||
`role` varchar(50) NOT NULL,
|
||||
`resource` varchar(255) NOT NULL,
|
||||
`action` varchar(8) NOT NULL,
|
||||
UNIQUE INDEX `uk_role_permission` (`role`,`resource`,`action`) USING BTREE
|
||||
);
|
||||
|
||||
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
|
||||
|
||||
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
|
@ -1,228 +0,0 @@
|
||||
/*
|
||||
* Copyright 1999-2018 Alibaba Group Holding Ltd.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
CREATE SCHEMA nacos AUTHORIZATION nacos;
|
||||
|
||||
CREATE TABLE config_info (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) default '',
|
||||
app_name varchar(128),
|
||||
content CLOB,
|
||||
md5 varchar(32) DEFAULT NULL,
|
||||
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
src_user varchar(128) DEFAULT NULL,
|
||||
src_ip varchar(50) DEFAULT NULL,
|
||||
c_desc varchar(256) DEFAULT NULL,
|
||||
c_use varchar(64) DEFAULT NULL,
|
||||
effect varchar(64) DEFAULT NULL,
|
||||
type varchar(64) DEFAULT NULL,
|
||||
c_schema LONG VARCHAR DEFAULT NULL,
|
||||
constraint configinfo_id_key PRIMARY KEY (id),
|
||||
constraint uk_configinfo_datagrouptenant UNIQUE (data_id,group_id,tenant_id));
|
||||
|
||||
CREATE INDEX configinfo_dataid_key_idx ON config_info(data_id);
|
||||
CREATE INDEX configinfo_groupid_key_idx ON config_info(group_id);
|
||||
CREATE INDEX configinfo_dataid_group_key_idx ON config_info(data_id, group_id);
|
||||
|
||||
CREATE TABLE his_config_info (
|
||||
id bigint NOT NULL,
|
||||
nid bigint NOT NULL generated by default as identity,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) default '',
|
||||
app_name varchar(128),
|
||||
content CLOB,
|
||||
md5 varchar(32) DEFAULT NULL,
|
||||
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00.000',
|
||||
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00.000',
|
||||
src_user varchar(128),
|
||||
src_ip varchar(50) DEFAULT NULL,
|
||||
op_type char(10) DEFAULT NULL,
|
||||
constraint hisconfiginfo_nid_key PRIMARY KEY (nid));
|
||||
|
||||
CREATE INDEX hisconfiginfo_dataid_key_idx ON his_config_info(data_id);
|
||||
CREATE INDEX hisconfiginfo_gmt_create_idx ON his_config_info(gmt_create);
|
||||
CREATE INDEX hisconfiginfo_gmt_modified_idx ON his_config_info(gmt_modified);
|
||||
|
||||
|
||||
CREATE TABLE config_info_beta (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) default '',
|
||||
app_name varchar(128),
|
||||
content CLOB,
|
||||
beta_ips varchar(1024),
|
||||
md5 varchar(32) DEFAULT NULL,
|
||||
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
src_user varchar(128),
|
||||
src_ip varchar(50) DEFAULT NULL,
|
||||
constraint configinfobeta_id_key PRIMARY KEY (id),
|
||||
constraint uk_configinfobeta_datagrouptenant UNIQUE (data_id,group_id,tenant_id));
|
||||
|
||||
CREATE TABLE config_info_tag (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) default '',
|
||||
tag_id varchar(128) NOT NULL,
|
||||
app_name varchar(128),
|
||||
content CLOB,
|
||||
md5 varchar(32) DEFAULT NULL,
|
||||
gmt_create timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
src_user varchar(128),
|
||||
src_ip varchar(50) DEFAULT NULL,
|
||||
constraint configinfotag_id_key PRIMARY KEY (id),
|
||||
constraint uk_configinfotag_datagrouptenanttag UNIQUE (data_id,group_id,tenant_id,tag_id));
|
||||
|
||||
CREATE TABLE config_info_aggr (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) default '',
|
||||
datum_id varchar(255) NOT NULL,
|
||||
app_name varchar(128),
|
||||
content CLOB,
|
||||
gmt_modified timestamp NOT NULL DEFAULT '2010-05-05 00:00:00',
|
||||
constraint configinfoaggr_id_key PRIMARY KEY (id),
|
||||
constraint uk_configinfoaggr_datagrouptenantdatum UNIQUE (data_id,group_id,tenant_id,datum_id));
|
||||
|
||||
CREATE TABLE app_list (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
app_name varchar(128) NOT NULL,
|
||||
is_dynamic_collect_disabled smallint DEFAULT 0,
|
||||
last_sub_info_collected_time timestamp DEFAULT '1970-01-01 08:00:00.0',
|
||||
sub_info_lock_owner varchar(128),
|
||||
sub_info_lock_time timestamp DEFAULT '1970-01-01 08:00:00.0',
|
||||
constraint applist_id_key PRIMARY KEY (id),
|
||||
constraint uk_appname UNIQUE (app_name));
|
||||
|
||||
CREATE TABLE app_configdata_relation_subs (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
app_name varchar(128) NOT NULL,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
constraint configdatarelationsubs_id_key PRIMARY KEY (id),
|
||||
constraint uk_app_sub_config_datagroup UNIQUE (app_name, data_id, group_id));
|
||||
|
||||
|
||||
CREATE TABLE app_configdata_relation_pubs (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
app_name varchar(128) NOT NULL,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
constraint configdatarelationpubs_id_key PRIMARY KEY (id),
|
||||
constraint uk_app_pub_config_datagroup UNIQUE (app_name, data_id, group_id));
|
||||
|
||||
CREATE TABLE config_tags_relation (
|
||||
id bigint NOT NULL,
|
||||
tag_name varchar(128) NOT NULL,
|
||||
tag_type varchar(64) DEFAULT NULL,
|
||||
data_id varchar(255) NOT NULL,
|
||||
group_id varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) DEFAULT '',
|
||||
nid bigint NOT NULL generated by default as identity,
|
||||
constraint config_tags_id_key PRIMARY KEY (nid),
|
||||
constraint uk_configtagrelation_configidtag UNIQUE (id, tag_name, tag_type));
|
||||
|
||||
CREATE INDEX config_tags_tenant_id_idx ON config_tags_relation(tenant_id);
|
||||
|
||||
CREATE TABLE group_capacity (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
group_id varchar(128) DEFAULT '',
|
||||
quota int DEFAULT 0,
|
||||
usage int DEFAULT 0,
|
||||
max_size int DEFAULT 0,
|
||||
max_aggr_count int DEFAULT 0,
|
||||
max_aggr_size int DEFAULT 0,
|
||||
max_history_count int DEFAULT 0,
|
||||
gmt_create timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
constraint group_capacity_id_key PRIMARY KEY (id),
|
||||
constraint uk_group_id UNIQUE (group_id));
|
||||
|
||||
CREATE TABLE tenant_capacity (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
tenant_id varchar(128) DEFAULT '',
|
||||
quota int DEFAULT 0,
|
||||
usage int DEFAULT 0,
|
||||
max_size int DEFAULT 0,
|
||||
max_aggr_count int DEFAULT 0,
|
||||
max_aggr_size int DEFAULT 0,
|
||||
max_history_count int DEFAULT 0,
|
||||
gmt_create timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
gmt_modified timestamp DEFAULT '2010-05-05 00:00:00',
|
||||
constraint tenant_capacity_id_key PRIMARY KEY (id),
|
||||
constraint uk_tenant_id UNIQUE (tenant_id));
|
||||
|
||||
CREATE TABLE tenant_info (
|
||||
id bigint NOT NULL generated by default as identity,
|
||||
kp varchar(128) NOT NULL,
|
||||
tenant_id varchar(128) DEFAULT '',
|
||||
tenant_name varchar(128) DEFAULT '',
|
||||
tenant_desc varchar(256) DEFAULT NULL,
|
||||
create_source varchar(32) DEFAULT NULL,
|
||||
gmt_create bigint NOT NULL,
|
||||
gmt_modified bigint NOT NULL,
|
||||
constraint tenant_info_id_key PRIMARY KEY (id),
|
||||
constraint uk_tenant_info_kptenantid UNIQUE (kp,tenant_id));
|
||||
CREATE INDEX tenant_info_tenant_id_idx ON tenant_info(tenant_id);
|
||||
|
||||
CREATE TABLE users (
|
||||
username varchar(50) NOT NULL PRIMARY KEY,
|
||||
password varchar(500) NOT NULL,
|
||||
enabled boolean NOT NULL DEFAULT true
|
||||
);
|
||||
|
||||
CREATE TABLE roles (
|
||||
username varchar(50) NOT NULL,
|
||||
role varchar(50) NOT NULL,
|
||||
constraint uk_username_role UNIQUE (username,role)
|
||||
);
|
||||
|
||||
CREATE TABLE permissions (
|
||||
role varchar(50) NOT NULL,
|
||||
resource varchar(512) NOT NULL,
|
||||
action varchar(8) NOT NULL,
|
||||
constraint uk_role_permission UNIQUE (role,resource,action)
|
||||
);
|
||||
|
||||
INSERT INTO users (username, password, enabled) VALUES ('nacos', '$2a$10$EuWPZHzz32dJN7jexM34MOeYirDdFAZm2kuWj7VEOJhhZkDrxfvUu', TRUE);
|
||||
|
||||
INSERT INTO roles (username, role) VALUES ('nacos', 'ROLE_ADMIN');
|
||||
|
||||
|
||||
/******************************************/
|
||||
/* ipv6 support */
|
||||
/******************************************/
|
||||
ALTER TABLE `config_info_tag`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `his_config_info`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `config_info`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
||||
|
||||
ALTER TABLE `config_info_beta`
|
||||
MODIFY COLUMN `src_ip` varchar(50) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL COMMENT 'source ip' AFTER `src_user`;
|
Binary file not shown.
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (properties) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,113 +0,0 @@
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Copyright 2001-2006 The Apache Software Foundation.
|
||||
@REM
|
||||
@REM Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@REM you may not use this file except in compliance with the License.
|
||||
@REM You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM http://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing, software
|
||||
@REM distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@REM See the License for the specific language governing permissions and
|
||||
@REM limitations under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM
|
||||
@REM Copyright (c) 2001-2006 The Apache Software Foundation. All rights
|
||||
@REM reserved.
|
||||
|
||||
@echo off
|
||||
|
||||
set ERROR_CODE=0
|
||||
|
||||
:init
|
||||
@REM Decide how to startup depending on the version of windows
|
||||
|
||||
@REM -- Win98ME
|
||||
if NOT "%OS%"=="Windows_NT" goto Win9xArg
|
||||
|
||||
@REM set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" @setlocal
|
||||
|
||||
@REM -- 4NT shell
|
||||
if "%eval[2+2]" == "4" goto 4NTArgs
|
||||
|
||||
@REM -- Regular WinNT shell
|
||||
set CMD_LINE_ARGS=%*
|
||||
goto WinNTGetScriptDir
|
||||
|
||||
@REM The 4NT Shell from jp software
|
||||
:4NTArgs
|
||||
set CMD_LINE_ARGS=%$
|
||||
goto WinNTGetScriptDir
|
||||
|
||||
:Win9xArg
|
||||
@REM Slurp the command line arguments. This loop allows for an unlimited number
|
||||
@REM of arguments (up to the command line limit, anyway).
|
||||
set CMD_LINE_ARGS=
|
||||
:Win9xApp
|
||||
if %1a==a goto Win9xGetScriptDir
|
||||
set CMD_LINE_ARGS=%CMD_LINE_ARGS% %1
|
||||
shift
|
||||
goto Win9xApp
|
||||
|
||||
:Win9xGetScriptDir
|
||||
set SAVEDIR=%CD%
|
||||
%0\
|
||||
cd %0\..\..
|
||||
set BASEDIR=%CD%
|
||||
cd %SAVEDIR%
|
||||
set SAVE_DIR=
|
||||
goto repoSetup
|
||||
|
||||
:WinNTGetScriptDir
|
||||
set BASEDIR=%~dp0\..
|
||||
|
||||
:repoSetup
|
||||
set REPO=
|
||||
|
||||
|
||||
if "%JAVACMD%"=="" set JAVACMD=java
|
||||
|
||||
if "%REPO%"=="" set REPO=%BASEDIR%\lib
|
||||
|
||||
set CLASSPATH="%BASEDIR%"\conf;"%REPO%"\*
|
||||
|
||||
set ENDORSED_DIR=
|
||||
if NOT "%ENDORSED_DIR%" == "" set CLASSPATH="%BASEDIR%"\%ENDORSED_DIR%\*;%CLASSPATH%
|
||||
|
||||
if NOT "%CLASSPATH_PREFIX%" == "" set CLASSPATH=%CLASSPATH_PREFIX%;%CLASSPATH%
|
||||
|
||||
@REM Reaching here means variables are defined and arguments have been captured
|
||||
:endInit
|
||||
|
||||
%JAVACMD% %JAVA_OPTS% -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="%BASEDIR%"/logs/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -Xloggc:"%BASEDIR%"/logs/seata_gc.log -verbose:gc -Dio.netty.leakDetectionLevel=advanced -Dlogback.color.disable-for-bat=true -classpath %CLASSPATH% -Dapp.name="seata-server" -Dapp.repo="%REPO%" -Dapp.home="%BASEDIR%" -Dbasedir="%BASEDIR%" io.seata.server.Server %CMD_LINE_ARGS%
|
||||
if %ERRORLEVEL% NEQ 0 goto error
|
||||
goto end
|
||||
|
||||
:error
|
||||
if "%OS%"=="Windows_NT" @endlocal
|
||||
set ERROR_CODE=%ERRORLEVEL%
|
||||
|
||||
:end
|
||||
@REM set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" goto endNT
|
||||
|
||||
@REM For old DOS remove the set variables from ENV - we assume they were not set
|
||||
@REM before we started - at least we don't leave any baggage around
|
||||
set CMD_LINE_ARGS=
|
||||
goto postExec
|
||||
|
||||
:endNT
|
||||
@REM If error code is set to 1 then the endlocal was done already in :error.
|
||||
if %ERROR_CODE% EQU 0 @endlocal
|
||||
|
||||
|
||||
:postExec
|
||||
|
||||
if "%FORCE_EXIT_ON_ERROR%" == "on" (
|
||||
if %ERROR_CODE% NEQ 0 exit %ERROR_CODE%
|
||||
)
|
||||
|
||||
exit /B %ERROR_CODE%
|
@ -1,128 +0,0 @@
|
||||
#!/bin/sh
|
||||
# ----------------------------------------------------------------------------
|
||||
# Copyright 2001-2006 The Apache Software Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# Copyright (c) 2001-2006 The Apache Software Foundation. All rights
|
||||
# reserved.
|
||||
|
||||
|
||||
# resolve links - $0 may be a softlink
|
||||
PRG="$0"
|
||||
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`/"$link"
|
||||
fi
|
||||
done
|
||||
|
||||
PRGDIR=`dirname "$PRG"`
|
||||
BASEDIR=`cd "$PRGDIR/.." >/dev/null; pwd`
|
||||
|
||||
# Reset the REPO variable. If you need to influence this use the environment setup file.
|
||||
REPO=
|
||||
|
||||
|
||||
# OS specific support. $var _must_ be set to either true or false.
|
||||
cygwin=false;
|
||||
darwin=false;
|
||||
case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
Darwin*) darwin=true
|
||||
if [ -z "$JAVA_VERSION" ] ; then
|
||||
JAVA_VERSION="CurrentJDK"
|
||||
else
|
||||
echo "Using Java version: $JAVA_VERSION"
|
||||
fi
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -x "/usr/libexec/java_home" ]; then
|
||||
JAVA_HOME=`/usr/libexec/java_home`
|
||||
else
|
||||
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
if [ -r /etc/gentoo-release ] ; then
|
||||
JAVA_HOME=`java-config --jre-home`
|
||||
fi
|
||||
fi
|
||||
|
||||
# For Cygwin, ensure paths are in UNIX format before anything is touched
|
||||
if $cygwin ; then
|
||||
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# If a specific java binary isn't specified search for the standard 'java' binary
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD=`which java`
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." 1>&2
|
||||
echo " We cannot execute $JAVACMD" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$REPO" ]
|
||||
then
|
||||
REPO="$BASEDIR"/lib
|
||||
fi
|
||||
|
||||
CLASSPATH="$BASEDIR"/conf:"$REPO"/*
|
||||
|
||||
ENDORSED_DIR=
|
||||
if [ -n "$ENDORSED_DIR" ] ; then
|
||||
CLASSPATH=$BASEDIR/$ENDORSED_DIR/*:$CLASSPATH
|
||||
fi
|
||||
|
||||
if [ -n "$CLASSPATH_PREFIX" ] ; then
|
||||
CLASSPATH=$CLASSPATH_PREFIX:$CLASSPATH
|
||||
fi
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$HOME" ] && HOME=`cygpath --path --windows "$HOME"`
|
||||
[ -n "$BASEDIR" ] && BASEDIR=`cygpath --path --windows "$BASEDIR"`
|
||||
[ -n "$REPO" ] && REPO=`cygpath --path --windows "$REPO"`
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" $JAVA_OPTS -server -Xmx2048m -Xms2048m -Xmn1024m -Xss512k -XX:SurvivorRatio=10 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -XX:MaxDirectMemorySize=1024m -XX:-OmitStackTraceInFastThrow -XX:-UseAdaptiveSizePolicy -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath="$BASEDIR"/logs/java_heapdump.hprof -XX:+DisableExplicitGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=75 -Xloggc:"$BASEDIR"/logs/seata_gc.log -verbose:gc -Dio.netty.leakDetectionLevel=advanced -Dlogback.color.disable-for-bat=true \
|
||||
-classpath "$CLASSPATH" \
|
||||
-Dapp.name="seata-server" \
|
||||
-Dapp.pid="$$" \
|
||||
-Dapp.repo="$REPO" \
|
||||
-Dapp.home="$BASEDIR" \
|
||||
-Dbasedir="$BASEDIR" \
|
||||
io.seata.server.Server \
|
||||
"$@"
|
@ -1 +0,0 @@
|
||||
io.seata.server.auth.DefaultCheckAuthHandler
|
@ -1,3 +0,0 @@
|
||||
io.seata.server.store.DbcpDataSourceProvider
|
||||
io.seata.server.store.DruidDataSourceProvider
|
||||
io.seata.server.store.HikariDataSourceProvider
|
@ -1,4 +0,0 @@
|
||||
io.seata.server.transaction.at.ATCore
|
||||
io.seata.server.transaction.tcc.TccCore
|
||||
io.seata.server.transaction.saga.SagaCore
|
||||
io.seata.server.transaction.xa.XACore
|
@ -1,3 +0,0 @@
|
||||
io.seata.server.storage.db.lock.DataBaseLockManager
|
||||
io.seata.server.storage.file.lock.FileLockManager
|
||||
io.seata.server.storage.redis.lock.RedisLockManager
|
@ -1,3 +0,0 @@
|
||||
io.seata.server.storage.file.session.FileSessionManager
|
||||
io.seata.server.storage.db.session.DataBaseSessionManager
|
||||
io.seata.server.storage.redis.session.RedisSessionManager
|
@ -1,30 +0,0 @@
|
||||
# 脚本说明
|
||||
|
||||
## [client](https://github.com/seata/seata/tree/develop/script/client)
|
||||
|
||||
> 存放用于客户端的配置和SQL
|
||||
|
||||
- at: AT模式下的 `undo_log` 建表语句
|
||||
- conf: 客户端的配置文件
|
||||
- saga: SAGA 模式下所需表的建表语句
|
||||
- spring: SpringBoot 应用支持的配置文件
|
||||
|
||||
## [server](https://github.com/seata/seata/tree/develop/script/server)
|
||||
|
||||
> 存放server侧所需SQL和部署脚本
|
||||
|
||||
- db: server 侧的保存模式为 `db` 时所需表的建表语句
|
||||
- docker-compose: server 侧通过 docker-compose 部署的脚本
|
||||
- helm: server 侧通过 Helm 部署的脚本
|
||||
- kubernetes: server 侧通过 Kubernetes 部署的脚本
|
||||
|
||||
## [config-center](https://github.com/seata/seata/tree/develop/script/config-center)
|
||||
|
||||
> 用于存放各种配置中心的初始化脚本,执行时都会读取 `config.txt`配置文件,并写入配置中心
|
||||
|
||||
- nacos: 用于向 Nacos 中添加配置
|
||||
- zk: 用于向 Zookeeper 中添加配置,脚本依赖 Zookeeper 的相关脚本,需要手动下载;ZooKeeper相关的配置可以写在 `zk-params.txt` 中,也可以在执行的时候输入
|
||||
- apollo: 向 Apollo 中添加配置,Apollo 的地址端口等可以写在 `apollo-params.txt`,也可以在执行的时候输入
|
||||
- etcd3: 用于向 Etcd3 中添加配置
|
||||
- consul: 用于向 consul 中添加配置
|
||||
|
@ -1,30 +0,0 @@
|
||||
# Script Description
|
||||
|
||||
## [client](https://github.com/seata/seata/tree/develop/script/client)
|
||||
|
||||
> Store configuration and SQL for client side
|
||||
|
||||
- at: Script of create table `undo_log` for AT mode.
|
||||
- conf: Configuration which client need.
|
||||
- saga: Script of create table in SAGA mode
|
||||
- spring: Configuration for Spring Boot
|
||||
|
||||
## [server](https://github.com/seata/seata/tree/develop/script/server)
|
||||
|
||||
> Store SQL and deploy script for server side
|
||||
|
||||
- db: Create table script for server when store mode is `db`
|
||||
- docker-compose: Script for deploy server by docker-compose
|
||||
- helm: Script for deploy server by Helm
|
||||
- kubernetes: Script for deploy server by Kubernetes
|
||||
|
||||
## [config-center](https://github.com/seata/seata/tree/develop/script/config-center)
|
||||
|
||||
> Store initialize script for configuration center, will use `config.txt` as configuration when initial
|
||||
|
||||
- nacos: Initialize script for Nacos
|
||||
- zk: Initialize script for ZooKeeper, the script need related script in Zookeeper, you need download yourself. You can modify `zk-params.txt` to change the ZooKeeper server configuration, or input when execute also
|
||||
- apollo: Initialize script for Apollo. You can modify `apollo-params.txt` to change the Apollo server configuration, or input when execute also
|
||||
- etcd3: Initialize script for Etcd3
|
||||
- consul: Initialize script for consul
|
||||
|
@ -1,65 +0,0 @@
|
||||
## transaction log store, only used in seata-server
|
||||
store {
|
||||
## store mode: file、db、redis
|
||||
mode = "file"
|
||||
## rsa decryption public key
|
||||
publicKey = ""
|
||||
## file store property
|
||||
file {
|
||||
## store location dir
|
||||
dir = "sessionStore"
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
maxBranchSessionSize = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
maxGlobalSessionSize = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
fileWriteBufferCacheSize = 16384
|
||||
# when recover batch read size
|
||||
sessionReloadReadSize = 100
|
||||
# async, sync
|
||||
flushDiskMode = async
|
||||
}
|
||||
|
||||
## database store property
|
||||
db {
|
||||
## the implement of jakarta.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
|
||||
datasource = "druid"
|
||||
## mysql/oracle/postgresql/h2/oceanbase etc.
|
||||
dbType = "mysql"
|
||||
driverClassName = "com.mysql.jdbc.Driver"
|
||||
## if using mysql to store the data, recommend add rewriteBatchedStatements=true in jdbc connection param
|
||||
url = "jdbc:mysql://127.0.0.1:3306/seata?rewriteBatchedStatements=true"
|
||||
user = "mysql"
|
||||
password = "mysql"
|
||||
minConn = 5
|
||||
maxConn = 100
|
||||
globalTable = "global_table"
|
||||
branchTable = "branch_table"
|
||||
lockTable = "lock_table"
|
||||
queryLimit = 100
|
||||
maxWait = 5000
|
||||
}
|
||||
|
||||
## redis store property
|
||||
redis {
|
||||
## redis mode: single、sentinel
|
||||
mode = "single"
|
||||
## single mode property
|
||||
single {
|
||||
host = "127.0.0.1"
|
||||
port = "6379"
|
||||
}
|
||||
## sentinel mode property
|
||||
sentinel {
|
||||
masterName = ""
|
||||
## such as "10.28.235.65:26379,10.28.235.65:26380,10.28.235.65:26381"
|
||||
sentinelHosts = ""
|
||||
}
|
||||
password = ""
|
||||
database = "0"
|
||||
minConn = 1
|
||||
maxConn = 10
|
||||
maxTotal = 100
|
||||
queryLimit = 100
|
||||
}
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
transport {
|
||||
# tcp, unix-domain-socket
|
||||
type = "TCP"
|
||||
#NIO, NATIVE
|
||||
server = "NIO"
|
||||
#enable heartbeat
|
||||
heartbeat = true
|
||||
# the client batch send request enable
|
||||
enableClientBatchSendRequest = false
|
||||
#thread factory for netty
|
||||
threadFactory {
|
||||
bossThreadPrefix = "NettyBoss"
|
||||
workerThreadPrefix = "NettyServerNIOWorker"
|
||||
serverExecutorThreadPrefix = "NettyServerBizHandler"
|
||||
shareBossWorker = false
|
||||
clientSelectorThreadPrefix = "NettyClientSelector"
|
||||
clientSelectorThreadSize = 1
|
||||
clientWorkerThreadPrefix = "NettyClientWorkerThread"
|
||||
# netty boss thread size
|
||||
bossThreadSize = 1
|
||||
#auto default pin or 8
|
||||
workerThreadSize = "default"
|
||||
}
|
||||
shutdown {
|
||||
# when destroy server, wait seconds
|
||||
wait = 3
|
||||
}
|
||||
serialization = "seata"
|
||||
compressor = "none"
|
||||
}
|
||||
|
||||
## transaction log store, only used in server side
|
||||
store {
|
||||
## store mode: file、db
|
||||
mode = "file"
|
||||
## file store property
|
||||
file {
|
||||
## store location dir
|
||||
dir = "sessionStore"
|
||||
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
|
||||
maxBranchSessionSize = 16384
|
||||
# globe session size , if exceeded throws exceptions
|
||||
maxGlobalSessionSize = 512
|
||||
# file buffer size , if exceeded allocate new buffer
|
||||
fileWriteBufferCacheSize = 16384
|
||||
# when recover batch read size
|
||||
sessionReloadReadSize = 100
|
||||
# async, sync
|
||||
flushDiskMode = async
|
||||
}
|
||||
|
||||
## database store property
|
||||
db {
|
||||
## the implement of jakarta.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
|
||||
datasource = "druid"
|
||||
## mysql/oracle/postgresql/h2/oceanbase etc.
|
||||
dbType = "mysql"
|
||||
driverClassName = "com.mysql.jdbc.Driver"
|
||||
## if using mysql to store the data, recommend add rewriteBatchedStatements=true in jdbc connection param
|
||||
url = "jdbc:mysql://127.0.0.1:3306/seata?rewriteBatchedStatements=true"
|
||||
user = "mysql"
|
||||
password = "mysql"
|
||||
minConn = 5
|
||||
maxConn = 30
|
||||
globalTable = "global_table"
|
||||
branchTable = "branch_table"
|
||||
lockTable = "lock_table"
|
||||
queryLimit = 100
|
||||
}
|
||||
}
|
||||
## server configuration, only used in server side
|
||||
server {
|
||||
recovery {
|
||||
#schedule committing retry period in milliseconds
|
||||
committingRetryPeriod = 1000
|
||||
#schedule asyn committing retry period in milliseconds
|
||||
asynCommittingRetryPeriod = 1000
|
||||
#schedule rollbacking retry period in milliseconds
|
||||
rollbackingRetryPeriod = 1000
|
||||
#schedule timeout retry period in milliseconds
|
||||
timeoutRetryPeriod = 1000
|
||||
}
|
||||
undo {
|
||||
logSaveDays = 7
|
||||
#schedule delete expired undo_log in milliseconds
|
||||
logDeletePeriod = 86400000
|
||||
}
|
||||
#check auth
|
||||
enableCheckAuth = true
|
||||
#unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
|
||||
maxCommitRetryTimeout = "-1"
|
||||
maxRollbackRetryTimeout = "-1"
|
||||
rollbackRetryTimeoutUnlockEnable = false
|
||||
retryDeadThreshold = 130000
|
||||
}
|
||||
|
||||
## metrics configuration, only used in server side
|
||||
metrics {
|
||||
enabled = false
|
||||
registryType = "compact"
|
||||
# multi exporters use comma divided
|
||||
exporterList = "prometheus"
|
||||
exporterPrometheusPort = 9898
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Copyright 1999-2019 Seata.io Group.
|
||||
~
|
||||
~ Licensed under the Apache License, Version 2.0 (the "License");
|
||||
~ you may not use this file except in compliance with the License.
|
||||
~ You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- Context listeners -->
|
||||
<contextListener class="io.seata.server.logging.listener.SystemPropertyLoggerContextListener"/>
|
||||
|
||||
<!-- Copied from spring-boot.jar -->
|
||||
<conversionRule conversionWord="clr" converterClass="io.seata.server.logging.logback.ColorConverter"/>
|
||||
<conversionRule conversionWord="wex" converterClass="io.seata.server.logging.logback.WhitespaceThrowableProxyConverter"/>
|
||||
<conversionRule conversionWord="wEx" converterClass="io.seata.server.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
|
||||
|
||||
<!-- common properties -->
|
||||
<property name="APPLICATION_NAME" value="seata-server"/>
|
||||
|
||||
|
||||
<!-- console-appender -->
|
||||
<include resource="logback/console-appender.xml"/>
|
||||
|
||||
<!-- file-appender -->
|
||||
<include resource="logback/file-appender.xml"/>
|
||||
|
||||
<!-- logstash-appender: off by default -->
|
||||
<!--<include resource="logback/logstash-appender.xml"/>-->
|
||||
|
||||
<!-- kafka-appender: off by default -->
|
||||
<!--<include resource="logback/kafka-appender.xml"/>-->
|
||||
|
||||
<root level="INFO">
|
||||
<!-- console-appender -->
|
||||
<appender-ref ref="CONSOLE"/>
|
||||
|
||||
<!-- file-appender -->
|
||||
<appender-ref ref="FILE_ALL"/>
|
||||
<appender-ref ref="FILE_WARN"/>
|
||||
<appender-ref ref="FILE_ERROR"/>
|
||||
|
||||
<!-- logstash-appender: off by default -->
|
||||
<!--<appender-ref ref="LOGSTASH"/>-->
|
||||
|
||||
<!-- kafka-appender: off by default -->
|
||||
<!--<appender-ref ref="KAFKA"/>-->
|
||||
</root>
|
||||
</configuration>
|
@ -1,12 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<included>
|
||||
<!-- console-appender properties -->
|
||||
<property name="CONSOLE_LOG_PATTERN" value="%clr(%d{HH:mm:ss.SSS}){faint} %clr(%5p) %clr(---){faint} %clr([%25.25t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n%wEx"/>
|
||||
|
||||
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
</included>
|
@ -1,67 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<included>
|
||||
<!-- file-appender properties -->
|
||||
<property name="LOG_HOME" value="${user.home}/logs/seata"/>
|
||||
<property name="FILE_LOG_PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} %5p --- [%30.30t] %-40.40logger{39} : %m%n%wEx"/>
|
||||
|
||||
<!--ALL-->
|
||||
<appender name="FILE_ALL" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${LOG_HOME}/${APPLICATION_NAME:-}.${PORT}.all.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/history/${APPLICATION_NAME:-}.${PORT}.all.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<MaxHistory>7</MaxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>${FILE_LOG_PATTERN}</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--WARN-->
|
||||
<appender name="FILE_WARN" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>WARN</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<file>${LOG_HOME}/${APPLICATION_NAME:-}.${PORT}.warn.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/history/${APPLICATION_NAME:-}.${PORT}.warn.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<MaxHistory>7</MaxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>${FILE_LOG_PATTERN}</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!--ERROR-->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>ERROR</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<file>${LOG_HOME}/${APPLICATION_NAME:-}.${PORT}.error.log</file>
|
||||
<append>true</append>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${LOG_HOME}/history/${APPLICATION_NAME:-}.${PORT}.error.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>2GB</maxFileSize>
|
||||
<MaxHistory>7</MaxHistory>
|
||||
<totalSizeCap>7GB</totalSizeCap>
|
||||
<cleanHistoryOnStart>true</cleanHistoryOnStart>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<Pattern>${FILE_LOG_PATTERN}</Pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
</included>
|
@ -1,32 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<included>
|
||||
<!-- kafka-appender properties -->
|
||||
<property name="KAFKA_BOOTSTRAP_SERVERS" value="localhost:9092"/>
|
||||
<property name="KAFKA_TOPIC" value="logback_to_logstash"/>
|
||||
|
||||
<appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender">
|
||||
<encoder>
|
||||
<!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS}|%p|seata-server|${PORT:-0}|%t|%logger|%X{X-TX-XID:-}|%X{X-TX-BRANCH-ID:-}|%m|%wex</pattern>-->
|
||||
<pattern>{
|
||||
"@timestamp": "%d{yyyy-MM-dd HH:mm:ss.SSS}",
|
||||
"level":"%p",
|
||||
"app_name":"${APPLICATION_NAME:-}",
|
||||
"PORT": ${PORT:-0},
|
||||
"thread_name": "%t",
|
||||
"logger_name": "%logger",
|
||||
"X-TX-XID": "%X{X-TX-XID:-}",
|
||||
"X-TX-BRANCH-ID": "%X{X-TX-BRANCH-ID:-}",
|
||||
"message": "%m",
|
||||
"stack_trace": "%wex"
|
||||
}
|
||||
</pattern>
|
||||
</encoder>
|
||||
<topic>${KAFKA_TOPIC}</topic>
|
||||
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
|
||||
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
|
||||
<producerConfig>bootstrap.servers=${KAFKA_BOOTSTRAP_SERVERS}</producerConfig>
|
||||
<producerConfig>acks=0</producerConfig>
|
||||
<producerConfig>linger.ms=1000</producerConfig>
|
||||
<producerConfig>max.block.ms=0</producerConfig>
|
||||
</appender>
|
||||
</included>
|
@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<included>
|
||||
<!-- logstash-appender properties -->
|
||||
<property name="LOGSTASH_DESTINATION" value="localhost:4560"/>
|
||||
|
||||
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
|
||||
<!-- the TCP address of the logstash -->
|
||||
<destination>${LOGSTASH_DESTINATION}</destination>
|
||||
|
||||
<!--<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">-->
|
||||
<encoder charset="UTF-8" class="io.seata.server.logging.logback.appender.EnhancedLogstashEncoder">
|
||||
<!-- the global custom fields -->
|
||||
<customFields>
|
||||
{
|
||||
"app_name": "${APPLICATION_NAME:-}"
|
||||
}
|
||||
</customFields>
|
||||
|
||||
<!-- exclude the provider of data `@version` -->
|
||||
<excludeProvider>net.logstash.logback.composite.LogstashVersionJsonProvider</excludeProvider>
|
||||
<!-- exclude providers not currently needed, reduce some performance loss. -->
|
||||
<excludeProvider>net.logstash.logback.composite.loggingevent.JsonMessageJsonProvider</excludeProvider>
|
||||
<excludeProvider>net.logstash.logback.composite.loggingevent.TagsJsonProvider</excludeProvider>
|
||||
<excludeProvider>net.logstash.logback.composite.loggingevent.LogstashMarkersJsonProvider</excludeProvider>
|
||||
<excludeProvider>net.logstash.logback.composite.loggingevent.ArgumentsJsonProvider</excludeProvider>
|
||||
</encoder>
|
||||
</appender>
|
||||
</included>
|
@ -1,96 +0,0 @@
|
||||
registry {
|
||||
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
application = "seata-server"
|
||||
serverAddr = "127.0.0.1:8848"
|
||||
group = "SEATA_GROUP"
|
||||
namespace = ""
|
||||
cluster = "default"
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
eureka {
|
||||
serviceUrl = "http://localhost:8761/eureka"
|
||||
application = "default"
|
||||
weight = "1"
|
||||
}
|
||||
redis {
|
||||
serverAddr = "localhost:6379"
|
||||
db = 0
|
||||
password = ""
|
||||
cluster = "default"
|
||||
timeout = 0
|
||||
}
|
||||
zk {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
sessionTimeout = 6000
|
||||
connectTimeout = 2000
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
consul {
|
||||
cluster = "default"
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
aclToken = ""
|
||||
}
|
||||
etcd3 {
|
||||
cluster = "default"
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
sofa {
|
||||
serverAddr = "127.0.0.1:9603"
|
||||
application = "default"
|
||||
region = "DEFAULT_ZONE"
|
||||
datacenter = "DefaultDataCenter"
|
||||
cluster = "default"
|
||||
group = "SEATA_GROUP"
|
||||
addressWaitTime = "3000"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
||||
|
||||
config {
|
||||
# file、nacos 、apollo、zk、consul、etcd3
|
||||
type = "file"
|
||||
|
||||
nacos {
|
||||
serverAddr = "127.0.0.1:8848"
|
||||
namespace = ""
|
||||
group = "SEATA_GROUP"
|
||||
username = ""
|
||||
password = ""
|
||||
dataId = "seataServer.properties"
|
||||
}
|
||||
consul {
|
||||
serverAddr = "127.0.0.1:8500"
|
||||
aclToken = ""
|
||||
}
|
||||
apollo {
|
||||
appId = "seata-server"
|
||||
## apolloConfigService will cover apolloMeta
|
||||
apolloMeta = "http://192.168.1.204:8801"
|
||||
apolloConfigService = "http://192.168.1.204:8080"
|
||||
namespace = "application"
|
||||
apolloAccesskeySecret = ""
|
||||
cluster = "seata"
|
||||
}
|
||||
zk {
|
||||
serverAddr = "127.0.0.1:2181"
|
||||
sessionTimeout = 6000
|
||||
connectTimeout = 2000
|
||||
username = ""
|
||||
password = ""
|
||||
nodePath = "/seata/seata.properties"
|
||||
}
|
||||
etcd3 {
|
||||
serverAddr = "http://localhost:2379"
|
||||
}
|
||||
file {
|
||||
name = "file.conf"
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user