delete search_monitor
This commit is contained in:
parent
c236d3730b
commit
3786f0822a
@ -1,11 +0,0 @@
|
||||
### http接口测试工具
|
||||
|
||||
该python脚本来源于公司内部的http测试工具:https://coding.jd.com/fangjiankang/python_httprunner.git
|
||||
|
||||
需安装的python依赖:pip install requests xlrd xlwt colorama
|
||||
|
||||
使用文档:https://cf.jd.com/pages/viewpage.action?pageId=478195770
|
||||
|
||||
开始执行测试的命令为:python start.py
|
||||
|
||||
测试结果在report目录
|
@ -1,22 +0,0 @@
|
||||
def check_options(case_options):
|
||||
"""
|
||||
检查post请求参数的合法性
|
||||
:param case_options: 用例内容
|
||||
:return:
|
||||
"""
|
||||
if 'url' not in case_options.keys():
|
||||
print("url 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return False
|
||||
if 'body' not in case_options.keys():
|
||||
print("body 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return False
|
||||
if 'headers' not in case_options.keys():
|
||||
print("headers 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return False
|
||||
if 'assert_type' not in case_options.keys():
|
||||
print("assert_type 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return False
|
||||
if 'assert_value' not in case_options.keys():
|
||||
print("assert_value 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return False
|
||||
return True
|
@ -1,5 +0,0 @@
|
||||
testcase_file_path = './myCase.xls' # 测试用例源文件地址
|
||||
report_path = "./report" # 测试报告生成父级路径
|
||||
xls_report = True # 是否生成测试报告,填写 True 或 False
|
||||
output_model = "detail" # 定义命令行输出接口执行时输出内容的详细程度,传值要求:simple(简要) 或 detail(详细)
|
||||
|
@ -1,148 +0,0 @@
|
||||
import net_request
|
||||
import checker
|
||||
import time
|
||||
import json
|
||||
import config
|
||||
import colorama
|
||||
from colorama import init, Fore, Back, Style
|
||||
|
||||
init(autoreset=True)
|
||||
|
||||
|
||||
def run(testcase_list):
|
||||
print('\n\033[1;33m========= 执行接口测试 ==========\033[0m')
|
||||
result_list = []
|
||||
i = 1
|
||||
for testcase in testcase_list:
|
||||
print('[测试接口%s]' % i)
|
||||
result_list.append(run_case(testcase))
|
||||
i += 1
|
||||
return result_list
|
||||
|
||||
|
||||
def run_case(case_options):
|
||||
"""
|
||||
运行测试用例
|
||||
:param case_options: 测试用例执行所需要的相关参数字典
|
||||
:return:返回空值
|
||||
"""
|
||||
# 如果从用例参数中不能获取request_type,则用例跳出,不做执行
|
||||
if 'request_type' not in case_options.keys():
|
||||
print("request_type 参数未设置,跳过此调用例执行,参数为:%s" % case_options)
|
||||
return
|
||||
|
||||
request_type = case_options['request_type'] # 获得请求类型
|
||||
response_result = '' # 请求返回后的存储变量
|
||||
begin_time = time.time() # 记录请求前的时间
|
||||
|
||||
# 执行post类型的请求
|
||||
if request_type.lower() == 'post':
|
||||
# 对用例参数的完整请进行检测
|
||||
if not checker.check_options(case_options):
|
||||
return
|
||||
# 发送post请求
|
||||
response_result = net_request.post(case_options['url'], case_options['headers'], case_options['body'])
|
||||
|
||||
# 执行get类型的请求
|
||||
if request_type.lower() == 'get':
|
||||
if not checker.check_options(case_options):
|
||||
return
|
||||
# 发送get请求
|
||||
response_result = net_request.get(case_options['url'], case_options['headers'], case_options['body'])
|
||||
|
||||
end_time = time.time() # 记录请求完成后的时间
|
||||
# 对执行结果进行判断,检查是否用例的通过情况
|
||||
check_result = analyse_result(response_result, case_options['assert_value'], case_options['assert_type'])
|
||||
# 将执行信息输出到控制台
|
||||
cost_time = round(end_time - begin_time, 3)
|
||||
output_execute_info(case_options, response_result, check_result, cost_time)
|
||||
# 将执行结果进行组装并返回给调用方
|
||||
return {'case_options': case_options, 'response_result': response_result, 'check_result': check_result,
|
||||
'cost_time': cost_time}
|
||||
|
||||
|
||||
def analyse_result(real_result, assert_value, assert_type):
|
||||
"""
|
||||
分析请求返回的测试结果
|
||||
:param real_result: 返回请求的结果,json串
|
||||
:param assert_value: 期望结果字串,来自请求的case_options字典
|
||||
:param assert_type: 断言的判断方式,来自请求的case_options字典(提供:包含、相等)
|
||||
:return:返回判断的结果,通过未为True,失败为False
|
||||
"""
|
||||
|
||||
# 处理包含的逻辑,如果请求返回结果包含断言值,则判断结果为True
|
||||
if '包含' == assert_type:
|
||||
if json.dumps(real_result, ensure_ascii=False).__contains__(str(assert_value)):
|
||||
return True
|
||||
# 处理相等的逻辑,如果请求返回结果与断言值相同,则判断结果为True
|
||||
if '相等' == assert_type:
|
||||
if str(assert_value) == json.dumps(real_result, ensure_ascii=False):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def output_execute_info(case_options, response_result, check_result, time_consuming):
|
||||
"""
|
||||
在命令行输出测试执行报告信息(支持简单模式和详细模式输出)
|
||||
:param case_options: 原测试用例信息
|
||||
:param response_result: 请求返回结果
|
||||
:param check_result: 测试结果,True、False
|
||||
:param time_consuming: 测试用例执行耗时
|
||||
:return:
|
||||
"""
|
||||
if config.output_model.lower() == 'simple':
|
||||
simple_output(case_options, response_result, check_result, time_consuming)
|
||||
elif config.output_model.lower() == 'detail':
|
||||
detail_output(case_options, response_result, check_result, time_consuming)
|
||||
else:
|
||||
print("请到config.py文件中配置输出模式(output_model)!")
|
||||
return
|
||||
|
||||
|
||||
def detail_output(case_options, response_result, check_result, time_consuming):
|
||||
"""
|
||||
在命令行输出测试执行报告信息(详细模式)
|
||||
:param case_options: 原测试用例信息
|
||||
:param response_result: 请求返回结果
|
||||
:param check_result: 测试结果,True、False
|
||||
:param time_consuming: 测试用例执行耗时
|
||||
:return:
|
||||
"""
|
||||
print("请求接口:", case_options['url'])
|
||||
print("接口名称:", case_options['interface_name'])
|
||||
print("请求类型:", case_options['request_type'].upper())
|
||||
print("请求参数:", case_options['body'])
|
||||
if check_result: # 按测试结果对返回内容进行着色输出
|
||||
print('返回结果: \033[1;32;40m' + json.dumps(response_result, ensure_ascii=False) + '\033[0m')
|
||||
print('断言内容: \033[1;32;40m' + case_options['assert_value'] + '\033[0m')
|
||||
else:
|
||||
print('返回结果: \033[1;31;40m' + json.dumps(response_result, ensure_ascii=False) + '\033[0m')
|
||||
print('断言内容: \033[1;31;40m' + case_options['assert_value'] + '\033[0m')
|
||||
print('断言方式: %s' % case_options['assert_type'])
|
||||
print("执行耗时:", time_consuming, '秒')
|
||||
test_result = '\033[1;32;40m通过\033[0m' if check_result is True else '\033[1;31;40m失败\033[0m' # 按测试结果染色命令行输出
|
||||
print('测试结果:', test_result) # 无高亮
|
||||
print("\n")
|
||||
return
|
||||
|
||||
|
||||
def simple_output(case_options, response_result, check_result, time_consuming):
|
||||
"""
|
||||
在命令行输出测试执行报告信息(简单模式)
|
||||
:param case_options: 原测试用例信息
|
||||
:param response_result: 请求返回结果
|
||||
:param check_result: 测试结果,True、False
|
||||
:return:
|
||||
"""
|
||||
print("请求接口:", case_options['url'])
|
||||
if check_result: # 按测试结果对返回内容进行着色输出
|
||||
print('返回结果: \033[1;32;40m' + json.dumps(response_result, ensure_ascii=False)[0:120] + '......' + '\033[0m')
|
||||
print('断言内容: \033[1;32;40m' + case_options['assert_value'] + '\033[0m')
|
||||
else:
|
||||
print('返回结果: \033[1;31;40m' + json.dumps(response_result, ensure_ascii=False)[0:120] + '......' + '\033[0m')
|
||||
print('断言内容: \033[1;31;40m' + case_options['assert_value'] + '\033[0m')
|
||||
print("执行耗时:", time_consuming, '秒')
|
||||
test_result = '\033[1;32;40m通过\033[0m' if check_result is True else '\033[1;31;40m失败\033[0m' # 按测试结果染色命令行输出
|
||||
print('测试结果:', test_result) # 无高亮
|
||||
print("\n")
|
||||
return
|
Binary file not shown.
@ -1,33 +0,0 @@
|
||||
import requests
|
||||
import traceback
|
||||
|
||||
|
||||
def post(url, headers=None, body=None):
|
||||
"""
|
||||
发送post请求
|
||||
:param url:请求地址
|
||||
:param headers:头部信息
|
||||
:param body:请求入参
|
||||
:return:
|
||||
"""
|
||||
|
||||
response_post = requests.post(url, json=body, headers=headers)
|
||||
res = response_post.text
|
||||
if res.__contains__('<title>登录页</title>'):
|
||||
return {"错误": "cookie失效"}
|
||||
return response_post.json()
|
||||
|
||||
|
||||
def get(url, headers, body):
|
||||
"""
|
||||
发送get请求
|
||||
:param headers: 头部信息
|
||||
:param url:请求地址
|
||||
:param body:请求入参
|
||||
:return:
|
||||
"""
|
||||
response_get = requests.get(url, params=body, headers=headers)
|
||||
res = response_get.text
|
||||
if res.__contains__('<title>登录页</title>'):
|
||||
return {"错误": "cookie失效"}
|
||||
return response_get.json()
|
@ -1,62 +0,0 @@
|
||||
import xlrd
|
||||
import os
|
||||
|
||||
|
||||
def read(testcase_file_path):
|
||||
"""
|
||||
读取接口测试用例源文件,将读取的有效用例写入字典
|
||||
:param testcase_file_path: 读取用例文件的路径地址
|
||||
:return:
|
||||
"""
|
||||
if not os.path.exists(testcase_file_path):
|
||||
print("%s 文件不存在,无法读取到文件内容" % testcase_file_path)
|
||||
return
|
||||
# 在命令行打印提示信息
|
||||
print('\n\033[1;33m====== 读取测试用例列表文件 ======\033[0m')
|
||||
print("文件地址:%s" % testcase_file_path)
|
||||
# 读取xls文件内容
|
||||
xls = xlrd.open_workbook(testcase_file_path)
|
||||
sh = xls.sheet_by_index(0)
|
||||
# 获得公共的头部文本信息
|
||||
common_header_text = sh.row_values(1)[2]
|
||||
# 头部无效行数定义,文件头3行是描述文本和列表头部栏位,不作为测试用例内容
|
||||
head_row_num = 3
|
||||
# 头部无效行索引定义,索引从0开始,文件0~2索引是描述文本和列表头部栏位,不作为测试用例内容
|
||||
head_row_index = head_row_num - 1
|
||||
# 预判断有效测试用例行数(即处理头部定义外可以视为测试用例的行,但不一定准确,比如出现行乱填的情况,需要在程序中进行排除)
|
||||
total_row_num = sh.nrows # 文件总行数
|
||||
need_read_num = total_row_num - head_row_num # 需要读取的总行数
|
||||
|
||||
i = 1
|
||||
testcase_list = []
|
||||
print("读取到被测接口信息如下:")
|
||||
while i <= need_read_num:
|
||||
row_index = head_row_index + i # 获得需要读取的行索引
|
||||
row_testcase_info = sh.row_values(row_index) # 获取到一行测试用例文本信息
|
||||
|
||||
is_execute = row_testcase_info[0]
|
||||
if is_execute == '是': # 只有"是否执行"单元格为 是 时才将用例加入被执行序列
|
||||
print(str(row_testcase_info)[0:120] + '......')
|
||||
# 将每条测试用例组合为一个字典进行存储
|
||||
testcase = {'interface_name': row_testcase_info[1], 'url': row_testcase_info[2],
|
||||
'assert_type': row_testcase_info[6],
|
||||
'assert_value': row_testcase_info[7]}
|
||||
|
||||
request_type = row_testcase_info[3]
|
||||
testcase['request_type'] = request_type
|
||||
|
||||
body = row_testcase_info[5]
|
||||
body.strip()
|
||||
if len(body) > 0: # 如果body没有填值,则默认随便给一个任意值
|
||||
testcase['body'] = eval(row_testcase_info[5])
|
||||
else:
|
||||
testcase['body'] = {"test": "test"}
|
||||
headers = row_testcase_info[4] # 获得行中的headers文本
|
||||
if headers == "公共头部": # 如果文本填写内文为 '公共头部',则取文件第二行的"公共头部信息"文本值
|
||||
headers = common_header_text
|
||||
testcase['headers'] = eval(headers)
|
||||
testcase_list.append(testcase)
|
||||
i += 1
|
||||
|
||||
print('用例总数:\033[1;32;40m%s条\033[0m' % len(testcase_list))
|
||||
return testcase_list
|
@ -1,21 +0,0 @@
|
||||
import colorama
|
||||
from colorama import init, Fore, Back, Style
|
||||
|
||||
init(autoreset=True)
|
||||
|
||||
|
||||
def output_report(result_list):
|
||||
print('\033[1;33m========== 统计测试结果 ==========\033[0m')
|
||||
print("执行总数: %s" % len(result_list))
|
||||
success_count = 0
|
||||
fail_count = 0
|
||||
cost_time = 0
|
||||
for result in result_list:
|
||||
cost_time += result['cost_time']
|
||||
if result['check_result']:
|
||||
success_count += 1
|
||||
else:
|
||||
fail_count += 1
|
||||
print('成功/失败:\033[1;32;40m%s\033[0m / \033[1;31;40m%s\033[0m' % (success_count, fail_count))
|
||||
print("执行总时长:%s 秒\n" % round(cost_time, 3))
|
||||
return
|
@ -1,17 +0,0 @@
|
||||
import executor
|
||||
import read_testcase
|
||||
import write_xls_report
|
||||
import config
|
||||
import reporter
|
||||
|
||||
# 1、读取测试用例列表文件
|
||||
testcase_list = read_testcase.read(config.testcase_file_path)
|
||||
|
||||
# 2、执行用例列表中的用例
|
||||
result_list = executor.run(testcase_list)
|
||||
|
||||
# 3、统计测试结果并在终端进行显示
|
||||
reporter.output_report(result_list)
|
||||
|
||||
# 4、将测试结果写入xls文件中
|
||||
write_xls_report.write(result_list)
|
@ -1,98 +0,0 @@
|
||||
import xlwt
|
||||
import config
|
||||
import time
|
||||
import os
|
||||
import json
|
||||
import colorama
|
||||
from colorama import init, Fore, Back, Style
|
||||
|
||||
init(autoreset=True)
|
||||
|
||||
|
||||
def write(result_list):
|
||||
"""
|
||||
将执行测试结果的总汇内容写入xls文件中
|
||||
:param result_list:
|
||||
:return:
|
||||
"""
|
||||
# 读取配置文件,若开启xls报告,则进行xsl报告生成
|
||||
if not config.xls_report:
|
||||
return
|
||||
if not os.path.exists(config.report_path): # 从配置文件中获取父目录路径,若目录不存在则进行创建
|
||||
os.mkdir(config.report_path)
|
||||
# 拼接生成的报告文件路径
|
||||
report_path = os.path.join(config.report_path, time.strftime("%Y%m%d%H%M%S", time.localtime()) + '_report.xls')
|
||||
print('\033[1;33m====== 生成xls格式的测试报告 ======\033[0m')
|
||||
# 创建一个workbook 设置编码
|
||||
workbook = xlwt.Workbook(encoding='utf-8')
|
||||
# 创建一个worksheet
|
||||
worksheet = workbook.add_sheet('测试结果')
|
||||
# 设置列表头部样式
|
||||
alignment = xlwt.Alignment()
|
||||
alignment.horz = xlwt.Alignment.HORZ_CENTER
|
||||
alignment.vert = xlwt.Alignment.VERT_CENTER
|
||||
header_style = xlwt.XFStyle()
|
||||
header_style.alignment = alignment
|
||||
col1 = worksheet.col(0)
|
||||
col1.width = 256 * 8
|
||||
col2 = worksheet.col(1)
|
||||
col2.width = 256 * 35
|
||||
col3 = worksheet.col(2)
|
||||
col3.width = 256 * 80
|
||||
col4 = worksheet.col(3)
|
||||
col4.width = 256 * 10
|
||||
col5 = worksheet.col(4)
|
||||
col5.width = 256 * 40
|
||||
col6 = worksheet.col(5)
|
||||
col6.width = 256 * 12
|
||||
col7 = worksheet.col(6)
|
||||
col7.width = 256 * 10
|
||||
# 设置表头字体大小和颜色
|
||||
font = xlwt.Font()
|
||||
font.height = 20 * 12
|
||||
font.colour_index = 17
|
||||
header_style.font = font
|
||||
# 设置头部第一行的行高
|
||||
tall_style = xlwt.easyxf('font:height 480')
|
||||
row1 = worksheet.row(0)
|
||||
row1.set_style(tall_style)
|
||||
|
||||
# 定义表格头部栏文本
|
||||
worksheet.write(0, 0, '序号', header_style)
|
||||
worksheet.write(0, 1, '接口名称', header_style)
|
||||
worksheet.write(0, 2, '被测接口', header_style)
|
||||
worksheet.write(0, 3, '请求方式', header_style)
|
||||
worksheet.write(0, 4, '请求参数', header_style)
|
||||
worksheet.write(0, 5, '耗时(秒)', header_style)
|
||||
worksheet.write(0, 6, '测试结果', header_style)
|
||||
worksheet.write(0, 7, '失败备注', header_style)
|
||||
|
||||
# 结果单元格样式
|
||||
col_style = xlwt.XFStyle()
|
||||
col_style.alignment = alignment
|
||||
style_success = xlwt.easyxf('pattern: pattern solid, fore_colour green')
|
||||
style_success.alignment = alignment
|
||||
style_fail = xlwt.easyxf('pattern: pattern solid, fore_colour red')
|
||||
style_fail.alignment = alignment
|
||||
|
||||
index = 1 # 设置序号
|
||||
for result in result_list: # 循环遍历出测试结果并按顺序写入xls文件中
|
||||
worksheet.write(index, 0, index, col_style)
|
||||
worksheet.write(index, 1, result['case_options']['interface_name'])
|
||||
worksheet.write(index, 2, result['case_options']['url'])
|
||||
worksheet.write(index, 3, str(result['case_options']['request_type']).upper(), col_style)
|
||||
if result['case_options'].get("body", ) is not None: # 存在body参数时则输出到文件
|
||||
worksheet.write(index, 4, json.dumps(result['case_options'].get('body', )))
|
||||
worksheet.write(index, 5, result['cost_time'], col_style)
|
||||
res = "通过" if result['check_result'] else '失败' # 通过检测结果True和False来重新设置结果文本问 通过和失败
|
||||
|
||||
if result['check_result']:
|
||||
worksheet.write(index, 6, res, style_success) # 成功文本背景染色为绿色
|
||||
else:
|
||||
worksheet.write(index, 6, res, style_fail) # 成功文本背景染色为红色
|
||||
worksheet.write(index, 7, '断言内容:' + str(result['case_options']['assert_value']) + "\n\r实际返回:" + str(
|
||||
result['response_result'])[0:30000]) # 失败用例,将断言内容和实际结果进行输出,实际结果输出长度在30000以内
|
||||
index += 1
|
||||
workbook.save(report_path)
|
||||
print("报告成功创建:" + str(report_path))
|
||||
return
|
@ -1,55 +0,0 @@
|
||||
{
|
||||
"MODULE" :
|
||||
{
|
||||
"SERVERSHARDING" :
|
||||
[
|
||||
{
|
||||
"ShardingName" : "keyword_index_data1743",
|
||||
"INSTANCE" :
|
||||
[
|
||||
{
|
||||
"ip" : "11.80.17.227",
|
||||
"port" : "12003",
|
||||
"weight" : 1
|
||||
},
|
||||
{
|
||||
"ip" : "11.50.77.97",
|
||||
"port" : "12003",
|
||||
"weight" : 1
|
||||
},
|
||||
{
|
||||
"ip" : "11.80.17.227",
|
||||
"port" : "11017",
|
||||
"weight" : 1,
|
||||
"role" : "index_gen",
|
||||
"disasterRole" : "master"
|
||||
},
|
||||
{
|
||||
"ip" : "11.50.77.97",
|
||||
"port" : "11017",
|
||||
"weight" : 1,
|
||||
"role" : "index_gen",
|
||||
"disasterRole" : "replicate"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"ShardingName" : "keyword_index_data1744",
|
||||
"INSTANCE" :
|
||||
[
|
||||
{
|
||||
"ip" : "11.50.77.97",
|
||||
"port" : "12003",
|
||||
"weight" : 1
|
||||
},
|
||||
{
|
||||
"ip" : "11.50.77.97",
|
||||
"port" : "11017",
|
||||
"weight" : 1 ,
|
||||
"role" : "index_gen"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<MODULE>
|
||||
<SERVERSHARDING ShardingName="keyword_index_data1010">
|
||||
<INSTANCE ip="11.80.17.227" port="12003" weight="1"/>
|
||||
<INSTANCE ip="11.80.17.227" port="11017" weight="1" role="index_gen"/>
|
||||
</SERVERSHARDING>
|
||||
<SERVERSHARDING ShardingName="keyword_index_data1011">
|
||||
<INSTANCE ip="11.50.77.97" port="12003" weight="1"/>
|
||||
<INSTANCE ip="11.50.77.97" port="11017" weight="1" role="index_gen"/>
|
||||
</SERVERSHARDING>
|
||||
</MODULE>
|
@ -1,48 +0,0 @@
|
||||
{
|
||||
"logLevel":7,
|
||||
"logFilePath":"../log",
|
||||
"invokeTimeout":500,
|
||||
"physicalId":3,
|
||||
"reportAlarmUrl":"http://monitor.m.jd.com/tools/new_alarm_api/send_alarm",
|
||||
"alarmReceivers":["chenyujie28@jd.com"],
|
||||
"getPhysicalInfoUrl" : "http://g1.jsf.jd.local/com.jd.cap.data.api.service.XCMDBService/cap_read/findByQuery/8912/",
|
||||
"listenAddr":
|
||||
{
|
||||
"ip":"11.80.17.227",
|
||||
"port":17877
|
||||
},
|
||||
"clusterHosts":
|
||||
[
|
||||
{
|
||||
"ip":"11.80.17.227",
|
||||
"port":17878
|
||||
},
|
||||
{
|
||||
"ip":"11.80.17.227",
|
||||
"port":17879
|
||||
}
|
||||
],
|
||||
"adminHosts":
|
||||
[
|
||||
{
|
||||
"ip":"192.168.144.126",
|
||||
"port":9988
|
||||
}
|
||||
],
|
||||
"dtcInfo":
|
||||
{
|
||||
"detectPeriod":100,
|
||||
"detectStep":300,
|
||||
"detectTimeoutSet":
|
||||
{
|
||||
"sameZoneTimeout":1000,
|
||||
"domesticZoneTimeout":2000,
|
||||
"abroadZoneTimeout":5000
|
||||
}
|
||||
},
|
||||
"Config":
|
||||
{
|
||||
"CaDir":"/usr/local/dtcadmin/ca/conf/",
|
||||
"CaPid":1606976164
|
||||
}
|
||||
}
|
@ -1,348 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle access to mysql database
|
||||
// created by qiuyu on Feb 21, 2019
|
||||
////////////////////////////////////////////////////////////////
|
||||
#include "DBInstance.h"
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <errmsg.h>
|
||||
#include <string.h>
|
||||
|
||||
DBInstance::DBInstance()
|
||||
{
|
||||
}
|
||||
|
||||
DBInstance::~DBInstance()
|
||||
{
|
||||
closeDB();
|
||||
}
|
||||
|
||||
void DBInstance::freeResult()
|
||||
{
|
||||
mysql_free_result(mDBConnection.sMyRes);
|
||||
mDBConnection.sMyRes = NULL;
|
||||
mDBConnection.sMyRow = NULL;
|
||||
}
|
||||
|
||||
// initialize the db connection in this instance
|
||||
bool DBInstance::initDB(
|
||||
const std::string& dbHostName,
|
||||
const int port,
|
||||
const std::string& dbName,
|
||||
const std::string& dbUser,
|
||||
const std::string& dbPassword,
|
||||
const int iConnectTimeOut,
|
||||
const int iReadTimeOut,
|
||||
const int iWriteTimeOut)
|
||||
{
|
||||
monitor_log_error("Database initilized info. host:%s, port:%d, dbName:%s, dbUser:%s,\
|
||||
dbPassword:%s", dbHostName.c_str(), port, dbName.c_str(), dbUser.c_str(), dbPassword.c_str());
|
||||
|
||||
if (dbHostName.empty() || port <= 0 || dbName.empty() || dbUser.empty() || dbPassword.empty())
|
||||
{
|
||||
monitor_log_error("invalid database initilized info.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mDBConnection.sMyRes = NULL;
|
||||
mDBConnection.hasConnected = false;
|
||||
mDBConnection.inTransaction = false;
|
||||
mDBConnection.sDBInfo.sDBHostName = dbHostName;
|
||||
mDBConnection.sDBInfo.sDBPort = port;
|
||||
mDBConnection.sDBInfo.sDBUserName = dbUser;
|
||||
mDBConnection.sDBInfo.sPassword = dbPassword;
|
||||
mDBConnection.sDBInfo.sDBName = dbName;
|
||||
|
||||
// init mysql
|
||||
mysql_init(&mDBConnection.sMysqlInstance);
|
||||
|
||||
// hard code
|
||||
mysql_options(&mDBConnection.sMysqlInstance, MYSQL_SET_CHARSET_NAME, "utf8");
|
||||
|
||||
// reconnect if connection lost from Mysql for alive timeout
|
||||
bool reconnect = true;
|
||||
mysql_options(&mDBConnection.sMysqlInstance, MYSQL_OPT_RECONNECT, &reconnect);
|
||||
|
||||
mysql_options(&mDBConnection.sMysqlInstance, MYSQL_OPT_CONNECT_TIMEOUT,(const char *)&iConnectTimeOut);
|
||||
mysql_options(&mDBConnection.sMysqlInstance, MYSQL_OPT_READ_TIMEOUT,(const char *)&iReadTimeOut);
|
||||
mysql_options(&mDBConnection.sMysqlInstance, MYSQL_OPT_WRITE_TIMEOUT,(const char *)&iWriteTimeOut);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DBInstance::closeDB()
|
||||
{
|
||||
monitor_log_error("closing database connection");
|
||||
|
||||
freeResult();
|
||||
// mDBConnection.sMyRes = NULL;
|
||||
// mDBConnection.sMyRow = NULL;
|
||||
mysql_close(&(mDBConnection.sMysqlInstance));
|
||||
mDBConnection.hasConnected = false;
|
||||
mDBConnection.inTransaction = false;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool DBInstance::connectDB()
|
||||
{
|
||||
if (mDBConnection.hasConnected)
|
||||
{
|
||||
monitor_log_error("has connected to mysql yet, check it.");
|
||||
return true;
|
||||
// closeDB();
|
||||
}
|
||||
|
||||
monitor_log_error("connecting to db, dbHost:%s, user:%s, dbName:%s",
|
||||
mDBConnection.sDBInfo.sDBHostName.c_str(), mDBConnection.sDBInfo.sDBUserName.c_str(),
|
||||
mDBConnection.sDBInfo.sDBName.c_str());
|
||||
|
||||
MYSQL *pMysql = &(mDBConnection.sMysqlInstance);
|
||||
if (!mysql_real_connect(pMysql, mDBConnection.sDBInfo.sDBHostName.c_str(),
|
||||
mDBConnection.sDBInfo.sDBUserName.c_str(), mDBConnection.sDBInfo.sPassword.c_str(),
|
||||
mDBConnection.sDBInfo.sDBName.c_str(),
|
||||
mDBConnection.sDBInfo.sDBPort, NULL, 0))
|
||||
{
|
||||
monitor_log_error("connect to db failed:%s", mysql_error(pMysql));
|
||||
return false;
|
||||
}
|
||||
mDBConnection.hasConnected = true;
|
||||
monitor_log_error("connected to db success");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DBInstance::startTransaction()
|
||||
{
|
||||
if(mDBConnection.inTransaction)
|
||||
{
|
||||
monitor_log_error("has unfinished transaction in this process");
|
||||
return false;
|
||||
}
|
||||
|
||||
const char *sql="BEGIN;";
|
||||
int ret = execSQL(sql);
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("start transaction error");
|
||||
}
|
||||
else
|
||||
{
|
||||
mDBConnection.inTransaction = true; //a transaction start in this connection
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool DBInstance::rollBackTransaction()
|
||||
{
|
||||
if (!mDBConnection.inTransaction)
|
||||
{
|
||||
monitor_log_error("no started transaction");
|
||||
return false;
|
||||
}
|
||||
|
||||
const char *sql="ROLLBACK;";
|
||||
int ret = execSQL(sql);
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("rollback transaction failed");
|
||||
}
|
||||
mDBConnection.inTransaction = false; //a transaction has been rollbacked
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DBInstance::commitTransaction()
|
||||
{
|
||||
if (!mDBConnection.inTransaction)
|
||||
{
|
||||
monitor_log_error("no transaction for committed");
|
||||
return false;
|
||||
}
|
||||
|
||||
const char *sql="COMMIT;";
|
||||
int ret = execSQL(sql);
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("committed transaction failed");
|
||||
}
|
||||
mDBConnection.inTransaction = false; //a transaction rollback in mysql connection
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int DBInstance::execSQL(const char *sql)
|
||||
{
|
||||
if (sql == NULL)
|
||||
{
|
||||
monitor_log_error("sql is NULL");
|
||||
return -1;
|
||||
}
|
||||
|
||||
monitor_log_error("execute sql. sql:%s", sql);
|
||||
|
||||
static int reConnect = 0;
|
||||
|
||||
RETRY:
|
||||
if (!mDBConnection.hasConnected)
|
||||
{
|
||||
monitor_log_error("mysql db has not connected");
|
||||
|
||||
if (!mDBConnection.inTransaction)
|
||||
{
|
||||
monitor_log_error("not in transaction, try to reconnect");
|
||||
if (!connectDB())
|
||||
{
|
||||
monitor_log_error("connect to mysql db failed");
|
||||
reConnect = 0;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("has an unfinished transaction, not connect automatic");
|
||||
reConnect = 0;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
MYSQL* pMysql = &(mDBConnection.sMysqlInstance);
|
||||
if (mDBConnection.sDBInfo.sDBName.empty())
|
||||
{
|
||||
monitor_log_error("database name can not be empty");
|
||||
reConnect = 0;
|
||||
closeDB();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (mysql_select_db(pMysql, mDBConnection.sDBInfo.sDBName.c_str()) != 0)
|
||||
{
|
||||
if (mysql_errno(pMysql) == CR_SERVER_GONE_ERROR && reConnect < 10)
|
||||
{
|
||||
// server have closed the connection for alive time elapsed, reconnect it
|
||||
monitor_log_error("retry connect, round:%d", reConnect);
|
||||
|
||||
// close the unclosed socket first
|
||||
closeDB();
|
||||
reConnect++;
|
||||
goto RETRY;
|
||||
}
|
||||
monitor_log_error("switch to db failed:%s", mysql_error(pMysql));
|
||||
reConnect = 0;
|
||||
closeDB();
|
||||
return -1;
|
||||
}
|
||||
reConnect = 0;
|
||||
|
||||
// mysql_query return 0 when excute success
|
||||
if (mysql_query(pMysql, sql))
|
||||
{
|
||||
monitor_log_error("query failed:%s", mysql_error(pMysql));
|
||||
|
||||
if (mysql_errno(pMysql) == CR_SERVER_GONE_ERROR || mysql_errno(pMysql) == CR_SERVER_LOST)
|
||||
closeDB();
|
||||
return -mysql_errno(pMysql);
|
||||
}
|
||||
|
||||
bool isSelect = false;
|
||||
if (!strncasecmp(skipWhiteSpace(sql), "select", 6)) isSelect = true;
|
||||
|
||||
// function `mysql_store_result` store the result to local and return the pointer point to
|
||||
// the it
|
||||
//
|
||||
// it's meaningless to call mysql_store_result when the query type is not select
|
||||
if (isSelect && !(mDBConnection.sMyRes = mysql_store_result(pMysql)))
|
||||
{
|
||||
monitor_log_error("mysql return a NULL result set:%s", mysql_error(pMysql));
|
||||
return -2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// return the pointer point to the value
|
||||
char* DBInstance::getFieldValue(const int fieldIndex)
|
||||
{
|
||||
if (NULL == mDBConnection.sMyRes || NULL == mDBConnection.sMyRow)
|
||||
{
|
||||
monitor_log_error("empty dataset. errno:%s", mysql_error(&mDBConnection.sMysqlInstance));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int numFields = getNumFields();
|
||||
if (fieldIndex < 0 || fieldIndex >= numFields)
|
||||
{
|
||||
monitor_log_error("field index out of boundary. totalFields:%d,\
|
||||
fieldIndex:%d", numFields, fieldIndex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return mDBConnection.sMyRow[fieldIndex];
|
||||
}
|
||||
|
||||
// get the next row result which has been stored in the sMyRes with function
|
||||
// mysql_store_result()
|
||||
// Return:1. reach to the end or fetch failed, return NULL
|
||||
// 2. otherwise return the result
|
||||
int DBInstance::fetchRow()
|
||||
{
|
||||
if (!mDBConnection.hasConnected)
|
||||
return -1;
|
||||
|
||||
if ((mDBConnection.sMyRow = mysql_fetch_row(mDBConnection.sMyRes)) != 0) return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
// return number of rows those affected in the previous operation
|
||||
// work for insert, delete, update sql command
|
||||
// return:
|
||||
// 1. > 0 : number of rows been touched
|
||||
// 2. = 0 : previous operation do nothing
|
||||
// 3. -1 : error
|
||||
int DBInstance::getAffectedRows()
|
||||
{
|
||||
return mysql_affected_rows(&(mDBConnection.sMysqlInstance));
|
||||
}
|
||||
|
||||
// return number of rows in the dataset
|
||||
// this command is only work for select sql
|
||||
// Note: if the table is empty, mysql_num_rows will return 1, not 0
|
||||
int DBInstance::getNumRows()
|
||||
{
|
||||
if (!mDBConnection.sMyRes) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
MYSQL_RES* pRes = mDBConnection.sMyRes;
|
||||
|
||||
return mysql_num_rows(pRes);
|
||||
}
|
||||
|
||||
// get the field number in one record
|
||||
int DBInstance::getNumFields()
|
||||
{
|
||||
if (!mDBConnection.sMyRes || NULL == mDBConnection.sMyRow) return 0;
|
||||
|
||||
MYSQL_RES* pRes = mDBConnection.sMyRes;
|
||||
|
||||
return mysql_num_fields(pRes);
|
||||
}
|
||||
/*
|
||||
* get auto increment id
|
||||
*/
|
||||
my_ulonglong DBInstance::getInsertID()
|
||||
{
|
||||
return mysql_insert_id(&(mDBConnection.sMysqlInstance));
|
||||
}
|
||||
|
||||
char* DBInstance::skipWhiteSpace(const char* sStr)
|
||||
{
|
||||
char *p = (char *)sStr;
|
||||
while(*p == ' ' || *p == '\t' || *p == '\r' || *p == '\n') p++;
|
||||
|
||||
return p;
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
#ifndef __DB_INSTANCE_H__
|
||||
#define __DB_INSTANCE_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
#define list_add my_list_add
|
||||
// #include "mysql.h"
|
||||
#include <mysql.h>
|
||||
#undef list_add
|
||||
|
||||
typedef struct
|
||||
{
|
||||
std::string sDBHostName; /*DB host*/
|
||||
unsigned int sDBPort;/*DB port*/
|
||||
std::string sDBUserName;/*DB user*/
|
||||
std::string sPassword; /*DB password*/
|
||||
std::string sDBName;/*DB name*/
|
||||
} DBInfo_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
MYSQL sMysqlInstance;
|
||||
MYSQL_RES* sMyRes;
|
||||
MYSQL_ROW sMyRow;
|
||||
// MYSQL_RES* sRes[MAX_MYSQL_RES];
|
||||
// MYSQL_ROW sRows[MAX_MYSQL_RES];
|
||||
DBInfo_t sDBInfo;
|
||||
// int sConnectTimeOut;
|
||||
// int sReadTimeOut;
|
||||
// int WriteTimeOut;
|
||||
bool hasConnected;
|
||||
bool inTransaction;
|
||||
} DBConnection_t;
|
||||
|
||||
class DBInstance
|
||||
{
|
||||
private:
|
||||
DBConnection_t mDBConnection;
|
||||
|
||||
public:
|
||||
DBInstance();
|
||||
~DBInstance();
|
||||
|
||||
public:
|
||||
bool initDB(
|
||||
const std::string& dbHostName,
|
||||
const int port,
|
||||
const std::string& dbName,
|
||||
const std::string& dbUser,
|
||||
const std::string& dbPassword,
|
||||
const int iConnectTimeOut,
|
||||
const int iReadTimeOut,
|
||||
const int iWriteTimeOut);
|
||||
|
||||
int execSQL(const char *sql);
|
||||
int getAffectedRows();
|
||||
void closeDB();
|
||||
bool connectDB();
|
||||
int fetchRow();
|
||||
void freeResult();
|
||||
my_ulonglong getInsertID();
|
||||
int getNumFields();
|
||||
int getNumRows();
|
||||
bool startTransaction();
|
||||
bool rollBackTransaction();
|
||||
bool commitTransaction();
|
||||
char* getFieldValue(const int fieldIndex);
|
||||
|
||||
private:
|
||||
char* skipWhiteSpace(const char* sStr);
|
||||
};
|
||||
|
||||
#endif // __DB_INSTANCE_H__
|
@ -1,269 +0,0 @@
|
||||
///////////////////////////////////////////////////
|
||||
//
|
||||
// the foundation class for agent and DTC detector
|
||||
// create by qiuyu on Dec 10, 2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#include "DetectHandlerBase.h"
|
||||
#include "DBInstance.h"
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#ifdef TEST_MONITOR
|
||||
// static const std::string sgDBHostName = "10.181.174.20";
|
||||
// static const int sgDBPort = 3306;
|
||||
// static const std::string sgDBUserName = "dtcadmin";
|
||||
// static const std::string sgUserPassword = "dtcadmin@jd2015";
|
||||
|
||||
static const std::string sgDBHostName = "11.80.17.227";
|
||||
static const int sgDBPort = 3306;
|
||||
static const std::string sgDBUserName = "root";
|
||||
static const std::string sgUserPassword = "root";
|
||||
|
||||
#else
|
||||
static const std::string sgDBHostName = "my12115m.mysql.jddb.com";
|
||||
static const int sgDBPort = 3358;
|
||||
static const std::string sgDBUserName = "dtcadmin";
|
||||
static const std::string sgUserPassword = "nbGFzcy9qb2luOztHRVQ=";
|
||||
#endif
|
||||
static const std::string sgDBName = "search_monitor_cluster";
|
||||
extern const std::string gTableName = "distributed_monitor_cluster_lock";
|
||||
static const int sgConnectTimeout = 10; // second
|
||||
static const int sgReadTimeout = 5; // second
|
||||
static const int sgWriteTimeout = 5; // second
|
||||
// use sLockExpiredTime to delete the unexpected dead-lock,
|
||||
// all timestampe base on MYSQL
|
||||
extern const int sgLockExpiredTime = 10 * 60; // 10 min
|
||||
|
||||
DBInstance* DetectHandlerBase::mDBInstance = NULL;
|
||||
std::string DetectHandlerBase::mGetPhysicalInfoUrl = "";
|
||||
std::string DetectHandlerBase::mSelfAddr = "";
|
||||
LirsCache* DetectHandlerBase::mPhysicalRegionCode = NULL;
|
||||
int64_t DetectHandlerBase::mCacheExpiredWhen = 0;
|
||||
std::bitset<DetectHandlerBase::eMaxZoneCode> DetectHandlerBase::mRegionCodeSet;
|
||||
|
||||
DetectHandlerBase::~DetectHandlerBase()
|
||||
{
|
||||
if (mDBInstance) delete mDBInstance;
|
||||
if (mPhysicalRegionCode) delete mPhysicalRegionCode;
|
||||
}
|
||||
|
||||
void DetectHandlerBase::addTimerEvent()
|
||||
{
|
||||
monitor_log_error("could never come here!!");
|
||||
return;
|
||||
}
|
||||
|
||||
void DetectHandlerBase::TimerNotify(void)
|
||||
{
|
||||
monitor_log_error("could never come here!!");
|
||||
return;
|
||||
}
|
||||
|
||||
// init static member data
|
||||
bool DetectHandlerBase::initHandler()
|
||||
{
|
||||
if (!mDBInstance)
|
||||
{
|
||||
mDBInstance = new DBInstance();
|
||||
if (!mDBInstance)
|
||||
{
|
||||
monitor_log_error("create database instance failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool rslt = mDBInstance->initDB(sgDBHostName, sgDBPort, sgDBName, sgDBUserName,\
|
||||
sgUserPassword, sgConnectTimeout, sgReadTimeout, sgWriteTimeout);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = mDBInstance->connectDB();
|
||||
if (!rslt) return false;
|
||||
}
|
||||
|
||||
if (mGetPhysicalInfoUrl.empty()) mGetPhysicalInfoUrl = DtcMonitorConfigMgr::getInstance()->getPhysicalInfoUrl();
|
||||
if (mGetPhysicalInfoUrl.empty())
|
||||
{
|
||||
monitor_log_error("invalid url.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// No race condition
|
||||
if (!mPhysicalRegionCode) mPhysicalRegionCode = new LirsCache(10000);
|
||||
if (!mPhysicalRegionCode)
|
||||
{
|
||||
monitor_log_error("allocate cache failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// register region code
|
||||
if (mRegionCodeSet.none())
|
||||
{
|
||||
// need to register
|
||||
registerRegionCode();
|
||||
}
|
||||
|
||||
if (mSelfAddr.empty())
|
||||
{
|
||||
const std::pair<std::string, int>& pair= DtcMonitorConfigMgr::getInstance()->getListenAddr();
|
||||
mSelfAddr = pair.first + ":" + DetectHandlerBase::toString<int>(pair.second);
|
||||
}
|
||||
if (mSelfAddr.empty())
|
||||
{
|
||||
monitor_log_error("invalid instance address!");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DetectHandlerBase::reportAlarm(const std::string& errMessage)
|
||||
{
|
||||
CurlHttp curlHttp;
|
||||
BuffV buff;
|
||||
curlHttp.SetHttpParams("%s", errMessage.c_str());
|
||||
curlHttp.SetTimeout(mDriverTimeout);
|
||||
|
||||
int http_ret = curlHttp.HttpRequest(mReportUrl.c_str(), &buff, false);
|
||||
if(0 != http_ret)
|
||||
{
|
||||
monitor_log_error("report alarm http error! curlHttp.HttpRequest error ret:[%d]", http_ret);
|
||||
return;
|
||||
}
|
||||
|
||||
std::string response = ";";
|
||||
response= buff.Ptr();
|
||||
monitor_log_error("response from report server! ret:[%d], response:[%s]", http_ret, response.c_str());
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int DetectHandlerBase::getInstanceTimeout(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& detectorAddr,
|
||||
const std::string& detectedNodeAddr)
|
||||
{
|
||||
DtcMonitorConfigMgr::TimeoutSet_t tmSet;
|
||||
switch (type)
|
||||
{
|
||||
/* case DetectHandlerBase::eAgentDetect:
|
||||
tmSet = DtcMonitorConfigMgr::getInstance()->getAgentTimeoutSet();
|
||||
break; */
|
||||
case DetectHandlerBase::eDtcDetect:
|
||||
tmSet = DtcMonitorConfigMgr::getInstance()->getDtcTimeoutSet();
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
return tmSet.sDomesticZoneTimeout;
|
||||
}
|
||||
|
||||
void DetectHandlerBase::getNodeInfo(
|
||||
const std::string& phyAddr,
|
||||
std::string& zoneCode)
|
||||
{
|
||||
// in current now, this API is not allowed to be called
|
||||
zoneCode = "";
|
||||
return;
|
||||
|
||||
CurlHttp curlHttp;
|
||||
BuffV buff;
|
||||
|
||||
std::stringstream body;
|
||||
body << "{\"ip\":\"" << phyAddr << "\"},1,100";
|
||||
// body << "{\"ip\":\"" << "10.187.155.131" << "\"},1,100";
|
||||
monitor_log_info("body contents:%s", body.str().c_str());
|
||||
|
||||
curlHttp.SetHttpParams("%s", body.str().c_str());
|
||||
curlHttp.SetTimeout(10); // 10s hard code
|
||||
|
||||
zoneCode = "";
|
||||
int http_ret = curlHttp.HttpRequest(mGetPhysicalInfoUrl.c_str(), &buff, false);
|
||||
if(0 != http_ret)
|
||||
{
|
||||
monitor_log_error("get physical info failed! addr:%s, errorCode:%d", phyAddr.c_str(), http_ret);
|
||||
return;
|
||||
}
|
||||
|
||||
std::string response = ";";
|
||||
response= buff.Ptr();
|
||||
monitor_log_info("physical info:%s", response.c_str());
|
||||
|
||||
Json::Reader infoReader;
|
||||
Json::Value infoValue;
|
||||
if(!infoReader.parse(response, infoValue))
|
||||
{
|
||||
monitor_log_error("parse physical info failed, addr:%s, response:%s", phyAddr.c_str(), response.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(infoValue.isMember("result") && infoValue["result"].isArray() && infoValue["result"].size() == 1))
|
||||
{
|
||||
monitor_log_error("invalid physical info, addr:%s, response:%s", phyAddr.c_str(), response.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
const Json::Value& instanceInfo = infoValue["result"][0];
|
||||
if (!(instanceInfo.isMember("dataCenterId") && instanceInfo["dataCenterId"].isInt()))
|
||||
{
|
||||
monitor_log_error("invalid instance info, addr:%s, response:%s", phyAddr.c_str(), instanceInfo.toStyledString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
// a bit duplicate conversion
|
||||
long long int centerCode = instanceInfo["dataCenterId"].asInt();
|
||||
if (centerCode <= 0 || centerCode >= (1LL << REGION_CODE_SHIFT))
|
||||
{
|
||||
monitor_log_error("illegal center code, addr:%s, centerCode:%lld", phyAddr.c_str(), centerCode);
|
||||
return;
|
||||
}
|
||||
|
||||
// random time to prevent cache avalanche
|
||||
int64_t expiredTime = GET_TIMESTAMP();
|
||||
expiredTime += (sCacheExpiredInterval + (sCacheExpiredInterval > 24 ? rand() % (sCacheExpiredInterval/24) : 0));
|
||||
zoneCode = DetectHandlerBase::toString<long long>((expiredTime << REGION_CODE_SHIFT) + centerCode);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// set aboard to 1 and the others to 0
|
||||
void DetectHandlerBase::registerRegionCode()
|
||||
{
|
||||
// reset all bit to 0
|
||||
mRegionCodeSet.reset();
|
||||
|
||||
mRegionCodeSet.set(eYNZoneCode);
|
||||
mRegionCodeSet.set(eHKTHZoneCode);
|
||||
monitor_log_error("Region code bitset size:%ld", mRegionCodeSet.size());
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool DetectHandlerBase::isNodeExpired(const std::string& nodeInfo)
|
||||
{
|
||||
long long int va = atoll(nodeInfo.c_str());
|
||||
int64_t time = GET_TIMESTAMP();
|
||||
return (va >> REGION_CODE_SHIFT) <= time;
|
||||
}
|
||||
|
||||
void DetectHandlerBase::stringSplit(
|
||||
const std::string& src,
|
||||
const std::string& delim,
|
||||
std::vector<std::string>& dst)
|
||||
{
|
||||
dst.clear();
|
||||
|
||||
std::string::size_type start_pos = 0;
|
||||
std::string::size_type end_pos = 0;
|
||||
end_pos = src.find(delim);
|
||||
while(std::string::npos != end_pos)
|
||||
{
|
||||
dst.push_back(src.substr(start_pos, end_pos - start_pos));
|
||||
start_pos = end_pos + delim.size();
|
||||
end_pos = src.find(delim, start_pos);
|
||||
}
|
||||
if(src.length() != start_pos)
|
||||
{
|
||||
dst.push_back(src.substr(start_pos));
|
||||
}
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
///////////////////////////////////////////////////
|
||||
//
|
||||
// the foundation class for agent and DTC detector
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#ifndef __DETECT_HANDLER_BASE_H__
|
||||
#define __DETECT_HANDLER_BASE_H__
|
||||
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
#include "DetectUtil.h"
|
||||
// #include "InvokeMgr.h"
|
||||
#include "curl_http.h"
|
||||
#include "poll_thread.h"
|
||||
#include "log.h"
|
||||
#include "LirsCache.h"
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <bitset>
|
||||
|
||||
#define REGION_CODE_SHIFT 10
|
||||
#define REGION_CODE_MASK (~(-1LL << REGION_CODE_SHIFT))
|
||||
|
||||
class DBInstance;
|
||||
|
||||
class DetectHandlerBase : public CTimerObject
|
||||
{
|
||||
private:
|
||||
static const int sExpiredTimeoffset = 60 * 1000 * 1000; // 60s
|
||||
static const int64_t sCacheExpiredInterval = 1LL * 24 * 3600 * 1000 * 1000; // one day
|
||||
|
||||
protected:
|
||||
static const int sEventTimerOffset = 300; // ms [-300, 300]
|
||||
|
||||
public:
|
||||
enum DetectType
|
||||
{
|
||||
eAgentDetect = 1,
|
||||
eDtcDetect
|
||||
};
|
||||
|
||||
enum ReportType
|
||||
{
|
||||
eAgentDetectFailed = 0,
|
||||
eOfflineAgentFailed = 1,
|
||||
eOfflineAgentSuccess = 2,
|
||||
eDtcDetectFailed,
|
||||
eSwitchDtcFailed,
|
||||
eSwitchDtcSuccess,
|
||||
eAccessDBFailed,
|
||||
eAccessCCFailed,
|
||||
eSwitchSlaveDtc,
|
||||
eReplicateDtcFailed,
|
||||
eNoneUsefulDtc,
|
||||
|
||||
eParseJsonResponseFailed = 7000,
|
||||
eResponseMissDefaultField,
|
||||
eInvalidReportType = -1
|
||||
};
|
||||
|
||||
enum ResponseCode
|
||||
{
|
||||
eROperateSuccess = 200,
|
||||
eRSwitchSlaveDtc = 201
|
||||
};
|
||||
|
||||
enum ZoneCode
|
||||
{
|
||||
eMinZoneCode = 0,
|
||||
|
||||
// demestic region
|
||||
eLF1ZoneCode = 3, // LF1
|
||||
eMJQZoneCode = 6, // MJQ data center
|
||||
eYNZoneCode = 7, // YN
|
||||
eMJQ3ZoneCode = 11, // MJQ3
|
||||
eLF3ZoneCode = 12, // LF3
|
||||
eHKTHZoneCode = 13, // THA
|
||||
eGZ1ZoneCode = 14, // GZ1
|
||||
eLF4ZoneCode = 15, // LF4
|
||||
eLF5ZoneCode = 17, // LF4
|
||||
eKEPLERZoneCode = 21, // kepler
|
||||
eHT3ZoneCode = 28, // HT3
|
||||
|
||||
eMaxZoneCode
|
||||
};
|
||||
|
||||
typedef struct InstanceInfo
|
||||
{
|
||||
std::string sAccessKey; // used by IP
|
||||
std::string sIpWithPort;
|
||||
int64_t sExpiredTimestamp;
|
||||
|
||||
InstanceInfo(
|
||||
const std::string& key,
|
||||
const std::string& addr,
|
||||
const int64_t expiredWhen)
|
||||
:
|
||||
sAccessKey(key),
|
||||
sIpWithPort(addr),
|
||||
sExpiredTimestamp(expiredWhen + sExpiredTimeoffset)
|
||||
{
|
||||
}
|
||||
|
||||
bool isExpired(const int64_t nowTimestamp) const
|
||||
{
|
||||
return sExpiredTimestamp <= nowTimestamp;
|
||||
}
|
||||
|
||||
bool operator < (const InstanceInfo& other) const
|
||||
{
|
||||
if ((sAccessKey != "" && other.sAccessKey != "")
|
||||
&& (sAccessKey != other.sAccessKey))
|
||||
{
|
||||
return sAccessKey < other.sAccessKey;
|
||||
}
|
||||
return sIpWithPort < other.sIpWithPort;
|
||||
}
|
||||
}InstanceInfo_t;
|
||||
|
||||
struct ProtocalHeader {
|
||||
uint32_t magic;
|
||||
uint32_t length;
|
||||
uint32_t cmd;
|
||||
};
|
||||
|
||||
protected:
|
||||
CPollThread* mDetectPoll;
|
||||
std::string mReportUrl;
|
||||
int mDriverTimeout; // ms
|
||||
int mDetectStep;
|
||||
int mPrevDetectIndex;
|
||||
std::set<InstanceInfo_t> mTroubleInstances;
|
||||
std::set<std::string> m_TroubleHandledIpSet;
|
||||
|
||||
// db instance
|
||||
static DBInstance* mDBInstance;
|
||||
|
||||
// region control relevant
|
||||
static std::string mGetPhysicalInfoUrl;
|
||||
static std::string mSelfAddr;
|
||||
static LirsCache* mPhysicalRegionCode; // mapping physicalId to region code
|
||||
static std::bitset<DetectHandlerBase::eMaxZoneCode> mRegionCodeSet; // region codes
|
||||
static int64_t mCacheExpiredWhen;
|
||||
|
||||
// statistic
|
||||
std::stringstream mDetectSummary;
|
||||
|
||||
public:
|
||||
DetectHandlerBase(CPollThread* poll)
|
||||
{
|
||||
mDetectPoll = poll;
|
||||
mCacheExpiredWhen = GET_TIMESTAMP() + sCacheExpiredInterval;
|
||||
mDetectSummary.str(""); // clear streaming
|
||||
srand(time(NULL));
|
||||
}
|
||||
|
||||
virtual ~DetectHandlerBase();
|
||||
virtual void addTimerEvent();
|
||||
virtual void TimerNotify(void);
|
||||
void reportAlarm(const std::string& errMessage);
|
||||
|
||||
static int getInstanceTimeout(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& detectorAddr,
|
||||
const std::string& nodeAddr);
|
||||
|
||||
template<typename T>
|
||||
static std::string toString(const T& val)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << val;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
protected:
|
||||
static int selectTimeoutWithZone(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& selfZoneCode,
|
||||
const std::string& peerZoneCode);
|
||||
|
||||
static void getNodeInfo(
|
||||
const std::string& phyAddr,
|
||||
std::string& zoneCode);
|
||||
|
||||
static void stringSplit(
|
||||
const std::string& src,
|
||||
const std::string& delim,
|
||||
std::vector<std::string>& dst);
|
||||
|
||||
static bool initHandler();
|
||||
static void registerRegionCode();
|
||||
static bool isNodeExpired(const std::string& nodeInfo);
|
||||
};
|
||||
#endif // __DETECT_HANDLER_BASE_H__
|
@ -1,209 +0,0 @@
|
||||
/////////////////////////////////////////////////////
|
||||
//
|
||||
// for detect Agent and Dtc instance
|
||||
// create by qiuyu on Nov 27, 2018
|
||||
//
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
#include "DetectUtil.h"
|
||||
#include "sockaddr.h"
|
||||
#include "log.h"
|
||||
#include "detector_instance.h"
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <sys/socket.h>
|
||||
#include <fcntl.h>
|
||||
#include <sstream>
|
||||
#include <errno.h>
|
||||
|
||||
bool DetectUtil::connectServer(
|
||||
int& fd,
|
||||
const std::string& ip,
|
||||
const int port)
|
||||
{
|
||||
#if 0
|
||||
CSocketAddress addr;
|
||||
std::stringstream bindAddr;
|
||||
|
||||
bindAddr << ip << ":" << port << "/tcp";
|
||||
addr.SetAddress(bindAddr.str().c_str(), (const char*)NULL);
|
||||
if (addr.SocketFamily() != 0)
|
||||
{
|
||||
monitor_log_error("invalid addr.");
|
||||
return false;
|
||||
}
|
||||
|
||||
fd = addr.CreateSocket();
|
||||
if (fd < 0)
|
||||
{
|
||||
monitor_log_error("create socket failed. errno:%d", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK);
|
||||
int ret = addr.ConnectSocket(fd);
|
||||
if (ret != 0)
|
||||
{
|
||||
monitor_log_error("connect to server failed. ip:%s, portL%d, errno:%d", ip.c_str(), port, errno);
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
monitor_log_info("connect to server. ip:%s, port:%d", ip.c_str(), port);
|
||||
fd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (fd < 0)
|
||||
{
|
||||
monitor_log_error("create socket failed. ip:%s, port:%d", ip.c_str(), port);
|
||||
return false;
|
||||
}
|
||||
|
||||
struct sockaddr_in net_addr;
|
||||
net_addr.sin_addr.s_addr = inet_addr(ip.c_str());
|
||||
net_addr.sin_family = AF_INET;
|
||||
net_addr.sin_port = htons(port);
|
||||
// block to connect
|
||||
int ret = connect(fd, (struct sockaddr *)&net_addr, sizeof(struct sockaddr));
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("connect to server failed, fd:%d, errno:%d, ip:%s, port:%d", fd, errno, ip.c_str(), port);
|
||||
close(fd);
|
||||
return false;
|
||||
}
|
||||
|
||||
// set socket to non-block
|
||||
fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int DetectUtil::recieveMessage(
|
||||
const int fd,
|
||||
char* data,
|
||||
const int dataLen)
|
||||
{
|
||||
int readNum = 0;
|
||||
int nRead = 0;
|
||||
int nRet = 0;
|
||||
do {
|
||||
nRet = read(fd, data + nRead, dataLen - nRead);
|
||||
if (nRet > 0)
|
||||
{
|
||||
nRead += nRet;
|
||||
if (nRead == dataLen) return nRead;
|
||||
}
|
||||
else if (nRet == 0)
|
||||
{
|
||||
// close the connection
|
||||
monitor_log_error("client close the socket, fd:%d", fd);
|
||||
// return nRead;
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (readNum < 1000 && (errno == EAGAIN || errno == EINTR))
|
||||
{
|
||||
readNum++;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
// close the connection
|
||||
monitor_log_error("client close the socket, fd:%d", fd);
|
||||
// return nRead;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}while (nRead < dataLen);
|
||||
|
||||
return dataLen;
|
||||
}
|
||||
|
||||
int DetectUtil::sendMessage(
|
||||
const int netfd,
|
||||
char* data,
|
||||
const int dataLen)
|
||||
{
|
||||
int sendNum = 0;
|
||||
int nWrite = 0;
|
||||
int nRet = 0;
|
||||
do {
|
||||
nRet = write(netfd, data + nWrite, dataLen - nWrite);
|
||||
if (nRet > 0)
|
||||
{
|
||||
nWrite += nRet;
|
||||
if (dataLen == nWrite) return nWrite;
|
||||
}
|
||||
else if (nRet < 0)
|
||||
{
|
||||
if (sendNum < 1000 && (errno == EINTR || errno == EAGAIN))
|
||||
{
|
||||
sendNum++;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
// connection has issue, need to close the socket
|
||||
monitor_log_error("write socket failed, fd:%d, errno:%d", netfd, errno);
|
||||
return -1;
|
||||
// return nWrite;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("write socket failed, fd:%d, errno:%d", netfd, errno);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
while(nWrite < dataLen);
|
||||
|
||||
return dataLen;
|
||||
}
|
||||
|
||||
bool DetectUtil::detectAgentInstance(
|
||||
const std::string& accessKey,
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode)
|
||||
{
|
||||
return CDetectorInstance::DetectAgent(accessKey, ipWithPort, timeout, isAlive, errCode);
|
||||
}
|
||||
|
||||
bool DetectUtil::detectDtcInstance(
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode)
|
||||
{
|
||||
return CDetectorInstance::DetectDTC(ipWithPort, timeout, isAlive, errCode);
|
||||
}
|
||||
|
||||
// network endian is big endian
|
||||
void DetectUtil::translateByteOrder(uint64_t &value)
|
||||
{
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
// do noting, network endian is big endian
|
||||
#elif __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
// translate to little endian
|
||||
unsigned char *val, temp;
|
||||
val = (unsigned char*)&value;
|
||||
temp = val[0];
|
||||
val[0] = val[7];
|
||||
val[7] = temp;
|
||||
temp = val[1];
|
||||
val[1] = val[6];
|
||||
val[6] = temp;
|
||||
temp = val[2];
|
||||
val[2] = val[5];
|
||||
val[5] = temp;
|
||||
temp = val[3];
|
||||
val[3] = val[4];
|
||||
val[4] = temp;
|
||||
#else
|
||||
#error unkown endian
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
/////////////////////////////////////////////////////
|
||||
//
|
||||
// for detect Agent and Dtc instance
|
||||
// create by qiuyu on Nov 27, 2018
|
||||
//
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __DETECT_UTIL_H__
|
||||
#define __DETECT_UTIL_H__
|
||||
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
class DetectUtil
|
||||
{
|
||||
public:
|
||||
static bool connectServer(
|
||||
int& fd,
|
||||
const std::string& ip,
|
||||
const int port);
|
||||
|
||||
static int recieveMessage(
|
||||
const int fd,
|
||||
char* data,
|
||||
const int dataLen);
|
||||
|
||||
static int sendMessage(
|
||||
const int fd,
|
||||
char* data,
|
||||
const int dataLen);
|
||||
|
||||
static bool detectAgentInstance(
|
||||
const std::string& accessKey,
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode);
|
||||
|
||||
static bool detectDtcInstance(
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode);
|
||||
|
||||
static void translateByteOrder(uint64_t& value);
|
||||
};
|
||||
|
||||
#endif // __DETECT_UTIL_H__
|
File diff suppressed because it is too large
Load Diff
@ -1,107 +0,0 @@
|
||||
///////////////////////////////////////////////////
|
||||
//
|
||||
// the dtc detector
|
||||
// create by qiuyu on Nov 27, 2018
|
||||
// Modify: chenyujie
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#ifndef __DTC_DETECT_HANDLER_H__
|
||||
#define __DTC_DETECT_HANDLER_H__
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "DetectHandlerBase.h"
|
||||
#include "config_center_parser.h"
|
||||
|
||||
#define DISTRIBUTE_LOCK_SOLE_IP "127.0.0.1"
|
||||
#define DISTRIBUTE_LOCK_SOLE_PORT "8888"
|
||||
|
||||
class DtcDetectHandler : public DetectHandlerBase
|
||||
{
|
||||
enum RetCode {
|
||||
RT_INIT_ERR = 10001,
|
||||
RT_PARSE_CONF_ERR,
|
||||
RT_PARSE_JSON_ERR,
|
||||
RT_PRE_RUN_ERR,
|
||||
RT_DB_ERR,
|
||||
RT_HTTP_ERR,
|
||||
};
|
||||
|
||||
private:
|
||||
static int64_t adminInfoIdx;
|
||||
|
||||
private:
|
||||
DtcClusterContextType mDtcList;
|
||||
SearchCoreClusterContextType mDtcMap;
|
||||
int mPrevDetectedIndex;
|
||||
DtcClusterContextType mCheckDtcList;
|
||||
ParserBase* m_pCurrentParser;
|
||||
bool m_bReportEnable;
|
||||
|
||||
public:
|
||||
DtcDetectHandler(CPollThread* poll);
|
||||
~DtcDetectHandler();
|
||||
|
||||
virtual void addTimerEvent();
|
||||
virtual void TimerNotify(void);
|
||||
|
||||
private:
|
||||
void initHandler();
|
||||
|
||||
bool batchDetect(
|
||||
const int startIndex,
|
||||
const int endIndex);
|
||||
|
||||
bool procSingleDetect(
|
||||
const std::string& addr,
|
||||
bool& isAlive,
|
||||
int& errCode);
|
||||
|
||||
bool broadCastConfirm(
|
||||
const std::string& addr,
|
||||
bool& needSwitch);
|
||||
|
||||
bool reportDtcAlarm(
|
||||
const DetectHandlerBase::ReportType type,
|
||||
const std::string& addr,
|
||||
const int errCode);
|
||||
|
||||
bool doDtcSwitch(
|
||||
const std::string& sIP,
|
||||
const std::string& addr,
|
||||
const int errCode);
|
||||
|
||||
void verifyAllExpired(const int line);
|
||||
|
||||
int verifySingleExpired(const std::string& sIP, const std::string& addr);
|
||||
|
||||
bool loadDtcClusterInfo();
|
||||
bool loadDtcClusterInfoNoOpr();
|
||||
bool loadSearchCoreClusterInfo();
|
||||
|
||||
int Connect(const std::string& ip, const uint32_t port);
|
||||
|
||||
char* MakeAdminPackage(const std::string& shardingName,
|
||||
const std::string& ip,
|
||||
const std::string& port,
|
||||
int& msgsize);
|
||||
|
||||
int NotifyRouteAdmin(const std::string& ip,
|
||||
const uint32_t port,
|
||||
const std::string& sharding,
|
||||
const std::string& localip,
|
||||
const std::string& localport);
|
||||
|
||||
int NotifyConfigCenter();
|
||||
|
||||
bool CheckIndexGenIsMaster(const std::string& sIP);
|
||||
|
||||
int getDistributedLockForConsistency(
|
||||
const std::string& ip,
|
||||
const std::string& port);
|
||||
|
||||
void releaseDistributedLock(
|
||||
const std::string& ip,
|
||||
const std::string& port);
|
||||
};
|
||||
#endif // __DTC_DETECT_HANDLER_H__
|
@ -1,109 +0,0 @@
|
||||
/////////////////////////////////////////////////////////
|
||||
//
|
||||
// This class detect the DTC instance for a internal time
|
||||
// created by qiuyu on Nov 26, 2018
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
#include "DtcMonitor.h"
|
||||
#include "poll_thread.h"
|
||||
#include "MonitorVoteListener.h"
|
||||
#include "DtcDetectHandler.h"
|
||||
|
||||
// DtcMonitor* DtcMonitor::mSelf = NULL;
|
||||
// CMutex* DtcMonitor::mMutexLock = new CMutex();
|
||||
|
||||
DtcMonitor::DtcMonitor()
|
||||
:
|
||||
mDetectPoll(new CPollThread("DtcDetectPoll")),
|
||||
mListener(NULL),
|
||||
mDtcDetector(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
DtcMonitor::~DtcMonitor()
|
||||
{
|
||||
// if (mMutexLock)
|
||||
// delete mMutexLock;
|
||||
if (mDetectPoll)
|
||||
delete mDetectPoll;
|
||||
if (mListener)
|
||||
delete mListener;
|
||||
if (mDtcDetector)
|
||||
delete mDtcDetector;
|
||||
}
|
||||
|
||||
bool DtcMonitor::initMonitor()
|
||||
{
|
||||
// add poller to poller thread
|
||||
if (!mDetectPoll)
|
||||
{
|
||||
monitor_log_error("create thread poll failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
int ret = mDetectPoll->InitializeThread();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("initialize thread poll failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// add listener to poll
|
||||
bool rslt = addListenerToPoll();
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = addTimerEventToPoll();
|
||||
if (!rslt) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitor::startMonitor()
|
||||
{
|
||||
bool rslt = initMonitor();
|
||||
if (!rslt) return false;
|
||||
|
||||
mDetectPoll->RunningThread();
|
||||
monitor_log_info("start DtcMonitor successful.");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitor::addListenerToPoll()
|
||||
{
|
||||
mListener = new MonitorVoteListener(mDetectPoll);
|
||||
if (!mListener)
|
||||
{
|
||||
monitor_log_error("create listener instance failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
int ret = mListener->Bind();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("bind address failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = mListener->attachThread();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("add listener to poll failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// add detect agent, detect dtc event to the poll
|
||||
bool DtcMonitor::addTimerEventToPoll()
|
||||
{
|
||||
mDtcDetector = new DtcDetectHandler(mDetectPoll);
|
||||
if (!mDtcDetector)
|
||||
{
|
||||
monitor_log_error("create dtc detector failed.");
|
||||
return false;
|
||||
}
|
||||
mDtcDetector->addTimerEvent();
|
||||
|
||||
return true;
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
/////////////////////////////////////////////////////////
|
||||
//
|
||||
// This class detect the DTC instance for a internal time
|
||||
// created by qiuyu on Nov 26, 2018
|
||||
////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __DTC_MONITOR_H__
|
||||
#define __DTC_MONITOR_H__
|
||||
|
||||
#include "singleton.h"
|
||||
|
||||
class DetectHandlerBase;
|
||||
class CPollThread;
|
||||
class MonitorVoteListener;
|
||||
|
||||
class DtcMonitor
|
||||
{
|
||||
private:
|
||||
// static DtcMonitor* mSelf;
|
||||
// static CMutex* mMutexLock;
|
||||
CPollThread* mDetectPoll;
|
||||
MonitorVoteListener* mListener;
|
||||
DetectHandlerBase* mDtcDetector;
|
||||
|
||||
public:
|
||||
DtcMonitor();
|
||||
virtual ~DtcMonitor();
|
||||
|
||||
static DtcMonitor* getInstance()
|
||||
{
|
||||
return CSingleton<DtcMonitor>::Instance();
|
||||
}
|
||||
// private:
|
||||
// DtcMonitor();
|
||||
// DtcMonitor(const DtcMonitor&);
|
||||
// DtcMonitor& operator=(const DtcMonitor&);
|
||||
|
||||
public:
|
||||
bool startMonitor();
|
||||
void stopMonitor();
|
||||
|
||||
private:
|
||||
bool initMonitor();
|
||||
bool addListenerToPoll();
|
||||
bool addTimerEventToPoll();
|
||||
};
|
||||
|
||||
#endif // __DTC_MONITOR_H__
|
@ -1,581 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle All human configurable config
|
||||
// created by qiuyu on Nov 26, 2018
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
#include "log.h"
|
||||
#include <sstream>
|
||||
#include <unistd.h>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <errno.h>
|
||||
|
||||
|
||||
// variable for gdb debug
|
||||
int sgPhysicalId = 0;
|
||||
|
||||
bool DtcMonitorConfigMgr::init(const std::string& path)
|
||||
{
|
||||
if (mHasInit)
|
||||
{
|
||||
monitor_log_info("has init this config yet.");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool rslt = false;
|
||||
Json::Reader reader;
|
||||
Json::Value jsonStyleValue;
|
||||
|
||||
std::ifstream conf(path.c_str());
|
||||
if (!conf)
|
||||
{
|
||||
monitor_log_error("open config file failed. fileName:%s", path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
rslt = reader.parse(conf, jsonStyleValue);
|
||||
if (!rslt)
|
||||
{
|
||||
monitor_log_error("parse config to json failed!");
|
||||
return false;
|
||||
}
|
||||
|
||||
rslt = parseLogLevel(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseLogFilePath(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseGlobalPhysicalId(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseInvokeTimeout(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
// parse listen addr
|
||||
rslt = parseListenAddr(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
// parse cluster host address
|
||||
rslt = parseClusterHostsInfo(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseAdminClusterInfo(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseDtcConf(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseReportAlarmUrl(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseAlarmReceivers(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parsePhysicalInfoUrl(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
rslt = parseConfigCenterContext(jsonStyleValue);
|
||||
if (!rslt) return false;
|
||||
|
||||
mHasInit = true;
|
||||
monitor_log_info("load customer config successful.");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseLogLevel(const Json::Value& jsonValue)
|
||||
{
|
||||
// Json::Value dtc_config;
|
||||
if (!jsonValue.isMember("logLevel") || !jsonValue["logLevel"].isInt())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sLogLevel = jsonValue["logLevel"].asInt();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseLogFilePath(const Json::Value& jsonValue)
|
||||
{
|
||||
// Json::Value dtc_config;
|
||||
if (!jsonValue.isMember("logFilePath") || !jsonValue["logFilePath"].isString())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sLogFilePath = jsonValue["logFilePath"].asString();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseGlobalPhysicalId(const Json::Value& jsonValue)
|
||||
{
|
||||
// 1.Because of the cluster was started up by website, so user configurable field
|
||||
// will not be allowed, remove this field from the config
|
||||
// 2.hard code the physical id will cause no bad influence, it only be used for
|
||||
// distinguishing the sequenceId created by which physical, use kernel thread id
|
||||
// to replace it
|
||||
sgPhysicalId = syscall(__NR_gettid);
|
||||
mConfs.sGlobalPhysicalId = sgPhysicalId % (0x7F); // one byte for physical id
|
||||
monitor_log_crit("Only print it out for debugging, physicalId:%d", sgPhysicalId);
|
||||
return true;
|
||||
|
||||
if (!jsonValue.isMember("physicalId") || !jsonValue["physicalId"].isInt())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
int phyId = jsonValue["physicalId"].asInt();
|
||||
if (phyId < 0)
|
||||
{
|
||||
monitor_log_error("physical id can not be negative, physicalId:%d", phyId);
|
||||
return false;
|
||||
}
|
||||
mConfs.sGlobalPhysicalId = phyId;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Due to distinguish the the physical zone for diffrent detecting timeout, the invoke
|
||||
// paramter sames to be useless, we should evaluate it dynamically
|
||||
bool DtcMonitorConfigMgr::parseInvokeTimeout(const Json::Value& jsonValue)
|
||||
{
|
||||
return true;
|
||||
if (!jsonValue.isMember("invokeTimeout") || !jsonValue["invokeTimeout"].isInt())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
int timeout = jsonValue["invokeTimeout"].asInt();
|
||||
if (timeout < 0)
|
||||
{
|
||||
monitor_log_error("invokeTimeout can not be negative, invokeTimeout:%d", timeout);
|
||||
return false;
|
||||
}
|
||||
mConfs.sInvokeTimeout = timeout;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseListenAddr(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("listenAddr") || !jsonValue["listenAddr"].isObject())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const Json::Value& listenAddr = jsonValue["listenAddr"];
|
||||
if (!listenAddr.isMember("ip") || !listenAddr["ip"].isString())
|
||||
{
|
||||
monitor_log_error("parse listenAddr ip failed.");
|
||||
return false;
|
||||
}
|
||||
std::string ip = listenAddr["ip"].asString();
|
||||
if (ip.empty())
|
||||
{
|
||||
monitor_log_error("ip can not be empty.");
|
||||
return false;
|
||||
}
|
||||
trimBlank(ip);
|
||||
if (ip == "*")
|
||||
{
|
||||
std::vector<std::string> localIps;
|
||||
bool rslt = getLocalIps(localIps);
|
||||
if (!rslt) return false;
|
||||
ip = localIps[0];
|
||||
}
|
||||
|
||||
if (!listenAddr.isMember("port") || !listenAddr["port"].isInt())
|
||||
{
|
||||
monitor_log_error("parse listenAddr port failed.");
|
||||
return false;
|
||||
}
|
||||
int port = listenAddr["port"].asInt();
|
||||
if (port <= 0)
|
||||
{
|
||||
monitor_log_error("parse port failed. port:%d", port);
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sListenAddr = std::make_pair(ip, port);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseClusterHostsInfo(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("clusterHosts") || !jsonValue["clusterHosts"].isArray())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<std::string> localIps;
|
||||
bool rslt = getLocalIps(localIps);
|
||||
if (!rslt) return false;
|
||||
|
||||
std::string ip;
|
||||
int port;
|
||||
const Json::Value& clusterInfo = jsonValue["clusterHosts"];
|
||||
for (int idx = 0; idx < (int)clusterInfo.size(); idx++)
|
||||
{
|
||||
ip = "";
|
||||
port = -1;
|
||||
const Json::Value& host = clusterInfo[idx];
|
||||
if (!host.isMember("ip") || !host["ip"].isString())
|
||||
{
|
||||
monitor_log_error("parse host ip failed.");
|
||||
return false;
|
||||
}
|
||||
ip = host["ip"].asString();
|
||||
if (ip.empty())
|
||||
{
|
||||
monitor_log_error("ip can not be empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// filter the local ip from the cluster config
|
||||
if (std::find(localIps.begin(), localIps.end(), ip) != localIps.end())
|
||||
{
|
||||
monitor_log_info("filter local ip:%s", ip.c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!host.isMember("port") || !host["port"].isInt())
|
||||
{
|
||||
monitor_log_error("parse host port failed.");
|
||||
return false;
|
||||
}
|
||||
port = host["port"].asInt();
|
||||
if (port <= 0)
|
||||
{
|
||||
monitor_log_error("parse port failed. port:%d", port);
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sClusterInfo.push_back(std::make_pair(ip, port));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseAdminClusterInfo(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("adminHosts") || !jsonValue["adminHosts"].isArray())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
const Json::Value& adminHosts = jsonValue["adminHosts"];
|
||||
|
||||
for (int idx = 0; idx < (int)adminHosts.size(); idx++)
|
||||
{
|
||||
const Json::Value& host = adminHosts[idx];
|
||||
if (!host.isMember("ip") || !host["ip"].isString())
|
||||
{
|
||||
monitor_log_error("parse admin ip failed.");
|
||||
return false;
|
||||
}
|
||||
std::string ip = host["ip"].asString();
|
||||
|
||||
if (!host.isMember("port") || !host["port"].isInt())
|
||||
{
|
||||
monitor_log_error("parse admin port failed.");
|
||||
return false;
|
||||
}
|
||||
int port = host["port"].asInt();
|
||||
if (port <= 0)
|
||||
{
|
||||
monitor_log_error("parse admin port failed. port:%d", port);
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sAdminInfo.push_back(std::make_pair(ip, port));
|
||||
}
|
||||
|
||||
if (mConfs.sAdminInfo.size() == 0) {
|
||||
monitor_log_error("admin info empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseDtcConf(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("dtcInfo") || !jsonValue["dtcInfo"].isObject())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const Json::Value& dtcInfo = jsonValue["dtcInfo"];
|
||||
const Json::Value& timeoutSet = dtcInfo["detectTimeoutSet"];
|
||||
if (!(timeoutSet.isMember("sameZoneTimeout") && timeoutSet["sameZoneTimeout"].isInt())
|
||||
|| !(timeoutSet.isMember("domesticZoneTimeout") && timeoutSet["domesticZoneTimeout"].isInt())
|
||||
|| !(timeoutSet.isMember("abroadZoneTimeout") && timeoutSet["abroadZoneTimeout"].isInt()))
|
||||
{
|
||||
monitor_log_error("timeoutSet configuration error, check it!");
|
||||
return false;
|
||||
}
|
||||
|
||||
int& ss = mConfs.sDtcConf.sTimeoutSet.sSameZoneTimeout = timeoutSet["sameZoneTimeout"].asInt();
|
||||
int& sd = mConfs.sDtcConf.sTimeoutSet.sDomesticZoneTimeout= timeoutSet["domesticZoneTimeout"].asInt();
|
||||
int& sa = mConfs.sDtcConf.sTimeoutSet.sAbroadZoneTimeout = timeoutSet["abroadZoneTimeout"].asInt();
|
||||
if (!mConfs.sDtcConf.sTimeoutSet.isValid())
|
||||
{
|
||||
monitor_log_error("confiuration error, check it!");
|
||||
return false;
|
||||
}
|
||||
// for risk controlling
|
||||
ss = ss >= eDtcDefaultTimeout ? ss : eDtcDefaultTimeout;
|
||||
sd = sd >= eDtcDefaultTimeout ? sd : eDtcDefaultTimeout;
|
||||
sa = sa >= eDtcDefaultTimeout ? sa : eDtcDefaultTimeout;
|
||||
|
||||
// event dirver timeout
|
||||
if (!(dtcInfo.isMember("detectPeriod") && dtcInfo["detectPeriod"].isInt()))
|
||||
{
|
||||
monitor_log_error("configuration error, check it!");
|
||||
return false;
|
||||
}
|
||||
int timeout = dtcInfo["detectPeriod"].asInt();
|
||||
mConfs.sDtcConf.sEventDriverTimeout = timeout > 0 ? timeout : eDtcDefaultTimeout;
|
||||
|
||||
int step;
|
||||
if (!dtcInfo.isMember("detectStep") || !dtcInfo["detectStep"].isInt())
|
||||
{
|
||||
monitor_log_info("maybe missing dtc field.");
|
||||
// return false;
|
||||
step = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
step = dtcInfo["detectStep"].asInt();
|
||||
}
|
||||
step = step > 0 ? step : eDtcDefaultStep;
|
||||
mConfs.sDtcConf.sDetectStep = step;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseReportAlarmUrl(const Json::Value& jsonValue)
|
||||
{
|
||||
// Json::Value dtc_config;
|
||||
if (!jsonValue.isMember("reportAlarmUrl") || !jsonValue["reportAlarmUrl"].isString())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string reportAlarmUrl = jsonValue["reportAlarmUrl"].asString();
|
||||
if (reportAlarmUrl.empty())
|
||||
{
|
||||
monitor_log_error("reportAlarmUrl can not be empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sReportAlarmUrl= reportAlarmUrl;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseAlarmReceivers(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("alarmReceivers") || !jsonValue["alarmReceivers"].isArray())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string reci;
|
||||
std::string recList = "";
|
||||
const Json::Value& recieves = jsonValue["alarmReceivers"];
|
||||
for (int idx = 0; idx < (int)recieves.size(); idx++)
|
||||
{
|
||||
if (!recieves[idx].isString())
|
||||
{
|
||||
monitor_log_error("parse alarmReceivers failed.");
|
||||
return false;
|
||||
}
|
||||
reci = recieves[idx].asString();
|
||||
if (reci.empty())
|
||||
{
|
||||
monitor_log_error("reciever can not be empty.");
|
||||
continue;
|
||||
}
|
||||
|
||||
recList.append(reci);
|
||||
if (idx != (int)recieves.size() - 1)
|
||||
recList.append(";");
|
||||
}
|
||||
|
||||
if (recList.empty())
|
||||
{
|
||||
monitor_log_error("reciever list can not be empty.");
|
||||
return false;
|
||||
}
|
||||
mConfs.sAlarmReceivers = recList;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parsePhysicalInfoUrl(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("getPhysicalInfoUrl") || !jsonValue["getPhysicalInfoUrl"].isString())
|
||||
{
|
||||
monitor_log_error("incorrect field in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string physicalInfoUrl = jsonValue["getPhysicalInfoUrl"].asString();
|
||||
if (physicalInfoUrl.empty())
|
||||
{
|
||||
monitor_log_error("reportAlarmUrl can not be empty.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mConfs.sGetPhysicalInfoUrl = physicalInfoUrl;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::parseConfigCenterContext(const Json::Value& jsonValue)
|
||||
{
|
||||
if (!jsonValue.isMember("Config") || !jsonValue["Config"].isObject())
|
||||
{
|
||||
monitor_log_error("incorrect centerConfig info in config.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const Json::Value& oConfigContext = jsonValue["Config"];
|
||||
if (!oConfigContext.isMember("CaDir") || !oConfigContext["CaDir"].isString())
|
||||
{
|
||||
monitor_log_error("parse Config CaDir failed.");
|
||||
return false;
|
||||
}
|
||||
std::string sCaDir = oConfigContext["CaDir"].asString();
|
||||
if (sCaDir.empty())
|
||||
{
|
||||
monitor_log_error("CaDir can not be empty.");
|
||||
return false;
|
||||
}
|
||||
mConfs.oConfigCenterContext.sCaDirPath = sCaDir;
|
||||
|
||||
if (!oConfigContext.isMember("CaPid") || !oConfigContext["CaPid"].isInt())
|
||||
{
|
||||
monitor_log_error("parse Config CaPid failed.");
|
||||
return false;
|
||||
}
|
||||
int iCaPid = oConfigContext["CaPid"].asInt();
|
||||
if (iCaPid <= 0)
|
||||
{
|
||||
monitor_log_error("parse Config CaPid failed, caPid:%d", iCaPid);
|
||||
return false;
|
||||
}
|
||||
mConfs.oConfigCenterContext.iCaPid = iCaPid;
|
||||
|
||||
std::stringstream sTemp;
|
||||
sTemp << iCaPid;
|
||||
mConfs.oConfigCenterContext.sValidDir = sCaDir + sTemp.str();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DtcMonitorConfigMgr::getLocalIps(std::vector<std::string>& localIps)
|
||||
{
|
||||
#ifdef TEST_MONITOR
|
||||
return true;
|
||||
#else
|
||||
localIps.clear();
|
||||
|
||||
// this way to get host ip has some issue is the hostname is "localdomain"
|
||||
// char hname[128];
|
||||
// struct hostent *hent;
|
||||
//
|
||||
// gethostname(hname, sizeof(hname));
|
||||
// monitor_log_info("local hostname:%s", hname);
|
||||
//
|
||||
// hent = gethostbyname(hname);
|
||||
// for(int idx = 0; hent->h_addr_list[idx]; idx++)
|
||||
// {
|
||||
// std::string ip(inet_ntoa(*(struct in_addr*)(hent->h_addr_list[idx])));
|
||||
// monitor_log_info("local host ip:%s", ip.c_str());
|
||||
//
|
||||
// if (ip.empty()) continue;
|
||||
// localIps.push_back(ip);
|
||||
// }
|
||||
//
|
||||
// if (localIps.size() <= 0)
|
||||
// {
|
||||
// monitor_log_error("get local host ip failed, need to check it.");
|
||||
// return false;
|
||||
// }
|
||||
|
||||
// get hostname from shell
|
||||
const char* shell = "ifconfig | grep inet | grep -v inet6 |\
|
||||
grep -v 127 | awk '{print $2}' | awk -F \":\" '{print $2}'";
|
||||
|
||||
FILE *fp;
|
||||
if ((fp = popen(shell, "r")) == NULL)
|
||||
{
|
||||
monitor_log_info("open the shell command failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// maybe has multiple network card
|
||||
char buf[256];
|
||||
while (fgets(buf, 255, fp) != NULL)
|
||||
{
|
||||
monitor_log_info("local ip:%s", buf);
|
||||
|
||||
// the main function has ignored the signal SIGCHLD, so the recycle comand
|
||||
// SIGCHLD sent by child process of popen will be ignored, then pclose will
|
||||
// return -1 and set errno to ECHILD
|
||||
if (pclose(fp) == -1 && errno != ECHILD)
|
||||
{
|
||||
monitor_log_info("close the file descriptor failed. errno:%d", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
// remove the character '\n' from the tail of the buf because the system call
|
||||
// fgets will get it
|
||||
std::string ip(buf);
|
||||
if (ip[ip.length() - 1] != '\n')
|
||||
{
|
||||
monitor_log_error("syntax error for fgets, ip:%s", ip.c_str());
|
||||
return false;
|
||||
}
|
||||
ip.erase(ip.begin() + (ip.length() - 1));
|
||||
monitor_log_info("local ip:%s", ip.c_str());
|
||||
localIps.push_back(ip);
|
||||
}
|
||||
|
||||
if (localIps.empty())
|
||||
{
|
||||
monitor_log_error("get local ips failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
void DtcMonitorConfigMgr::trimBlank(std::string& src)
|
||||
{
|
||||
for (std::string::iterator itr = src.begin(); itr != src.end();)
|
||||
{
|
||||
if (*itr == ' ' || *itr == '\t' || *itr == '\r' || *itr == '\n')
|
||||
itr = src.erase(itr);
|
||||
else
|
||||
itr++;
|
||||
}
|
||||
}
|
@ -1,164 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle All human configurable config
|
||||
// created by qiuyu on Nov 26, 2018
|
||||
////////////////////////////////////////////////////////////////
|
||||
#ifndef __DTC_MONITOR_CONFIG_MGR__
|
||||
#define __DTC_MONITOR_CONFIG_MGR__
|
||||
|
||||
#include "singleton.h"
|
||||
#include "json/json.h"
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
|
||||
//TEST_MONITOR参数用来区分测试(open) / 预发环境与正式环境的mysql信息(close)
|
||||
//#define TEST_MONITOR
|
||||
|
||||
#ifndef monitor_log_error
|
||||
#define monitor_log_error(format, args...) \
|
||||
log_error("<%ld>" format, pthread_self(),##args)
|
||||
#endif
|
||||
|
||||
#ifndef monitor_log_info
|
||||
#define monitor_log_info(format, args...) \
|
||||
log_info("<%ld>" format, pthread_self(),##args)
|
||||
#endif
|
||||
|
||||
#ifndef monitor_log_crit
|
||||
#define monitor_log_crit(format, args...) \
|
||||
log_crit("<%ld>" format, pthread_self(),##args)
|
||||
#endif
|
||||
|
||||
#ifndef monitor_log_warning
|
||||
#define monitor_log_warning(format, args...) \
|
||||
log_warning("<%ld>" format, pthread_self(),##args)
|
||||
#endif
|
||||
|
||||
#ifndef monitor_log_debug
|
||||
#define monitor_log_debug(format, args...) \
|
||||
log_debug("<%ld>" format, pthread_self(),##args)
|
||||
#endif
|
||||
|
||||
class DtcMonitorConfigMgr
|
||||
{
|
||||
public:
|
||||
typedef std::pair<std::string, int> Pair_t;
|
||||
typedef std::vector<std::pair<std::string, int> > PairVector_t;
|
||||
|
||||
enum
|
||||
{
|
||||
eAgentDefaultTimeout = 2000, //ms
|
||||
eAgentDefaultStep = 50,
|
||||
eDtcDefaultTimeout = 2000, // ms
|
||||
eDtcDefaultStep = 70
|
||||
};
|
||||
|
||||
typedef struct TimeoutSet
|
||||
{
|
||||
int sSameZoneTimeout; // ms
|
||||
int sDomesticZoneTimeout;
|
||||
int sAbroadZoneTimeout;
|
||||
|
||||
bool isValid()
|
||||
{
|
||||
return sSameZoneTimeout > 0 && sDomesticZoneTimeout > 0 && sAbroadZoneTimeout > 0;
|
||||
}
|
||||
}TimeoutSet_t;
|
||||
|
||||
private:
|
||||
typedef struct DtcConfig
|
||||
{
|
||||
TimeoutSet_t sTimeoutSet; // ms
|
||||
int sEventDriverTimeout; // ms
|
||||
int sDetectStep;
|
||||
}DtcConf_t;
|
||||
|
||||
typedef struct ConfigCenter
|
||||
{
|
||||
std::string sCaDirPath;
|
||||
int iCaPid;
|
||||
std::string sValidDir;
|
||||
}ConfigCenterContext;
|
||||
|
||||
typedef struct ConfigList
|
||||
{
|
||||
int sLogLevel;
|
||||
std::string sLogFilePath;
|
||||
int sGlobalPhysicalId; // for creating global sequence id
|
||||
int sInvokeTimeout;
|
||||
std::pair<std::string, int> sListenAddr;
|
||||
PairVector_t sAdminInfo;
|
||||
PairVector_t sClusterInfo;
|
||||
DtcConf_t sDtcConf;
|
||||
std::string sReportAlarmUrl;
|
||||
std::string sAlarmReceivers;
|
||||
std::string sGetPhysicalInfoUrl;
|
||||
ConfigCenterContext oConfigCenterContext;
|
||||
}ConfList_t;
|
||||
|
||||
bool mHasInit;
|
||||
ConfList_t mConfs;
|
||||
|
||||
public:
|
||||
DtcMonitorConfigMgr() { mHasInit = false;}
|
||||
|
||||
static DtcMonitorConfigMgr* getInstance()
|
||||
{
|
||||
return CSingleton<DtcMonitorConfigMgr>::Instance();
|
||||
}
|
||||
|
||||
static void Destroy()
|
||||
{
|
||||
CSingleton<DtcMonitorConfigMgr>::Destroy();
|
||||
}
|
||||
|
||||
bool init(const std::string& path);
|
||||
|
||||
inline int getLogLevel() { return mConfs.sLogLevel; }
|
||||
inline const std::string& getLogFilePath() { return mConfs.sLogFilePath; }
|
||||
|
||||
inline const std::pair<std::string, int>& getListenAddr() { return mConfs.sListenAddr; }
|
||||
|
||||
inline int getDtcDriverTimeout() { return mConfs.sDtcConf.sEventDriverTimeout; }
|
||||
inline int getDtcDetectStep() { return mConfs.sDtcConf.sDetectStep; }
|
||||
inline const TimeoutSet_t getDtcTimeoutSet() { return mConfs.sDtcConf.sTimeoutSet; }
|
||||
|
||||
const PairVector_t& getAdminInfo() { return mConfs.sAdminInfo; }
|
||||
const PairVector_t& getClusterInfo() { return mConfs.sClusterInfo; }
|
||||
|
||||
inline const std::string& getReportAlarmUrl() {return mConfs.sReportAlarmUrl; }
|
||||
|
||||
inline const std::string& getReceiverList() { return mConfs.sAlarmReceivers; }
|
||||
|
||||
inline int getInvokeTimeout() { return mConfs.sInvokeTimeout; }
|
||||
inline int getPhysicalId() { return mConfs.sGlobalPhysicalId; }
|
||||
|
||||
inline std::string getPhysicalInfoUrl() { return mConfs.sGetPhysicalInfoUrl; }
|
||||
|
||||
inline const std::string& getCaDirPath() const { return mConfs.oConfigCenterContext.sCaDirPath;}
|
||||
inline int getCaPid() { return mConfs.oConfigCenterContext.iCaPid;}
|
||||
inline const std::string& getValidDir() const { return mConfs.oConfigCenterContext.sValidDir;}
|
||||
|
||||
private:
|
||||
bool parseLogLevel(const Json::Value& jsonValue);
|
||||
bool parseLogFilePath(const Json::Value& jsonValue);
|
||||
bool parseGlobalPhysicalId(const Json::Value& jsonValue);
|
||||
bool parseInvokeTimeout(const Json::Value& jsonValue);
|
||||
bool parseListenAddr(const Json::Value& jsonValue);
|
||||
bool parseClusterHostsInfo(const Json::Value& jsonValue);
|
||||
bool parseAdminClusterInfo(const Json::Value& jsonValue);
|
||||
bool parseAgentConf(const Json::Value& jsonValue);
|
||||
bool parseDtcConf(const Json::Value& jsonValue);
|
||||
bool parseReportAlarmUrl(const Json::Value& jsonValue);
|
||||
bool parseAlarmReceivers(const Json::Value& jsonValue);
|
||||
bool parsePhysicalInfoUrl(const Json::Value& jsonValue);
|
||||
bool parseConfigCenterContext(const Json::Value& jsonValue);
|
||||
bool getLocalIps(std::vector<std::string>& localIps);
|
||||
void trimBlank(std::string& src);
|
||||
|
||||
};
|
||||
|
||||
#endif // __DTC_MONITOR_CONFIG_MGR__
|
@ -1,233 +0,0 @@
|
||||
////////////////////////////////////////////////////////////
|
||||
//
|
||||
// invoke client for invoking the peer monitor node
|
||||
// created by qiuyu on Nov 27, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#include "InvokeHandler.h"
|
||||
#include "log.h"
|
||||
#include "DetectUtil.h"
|
||||
#include "MonitorVoteHandler.h"
|
||||
#include "InvokeMgr.h"
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
|
||||
InvokeHandler::InvokeHandler(
|
||||
CPollThread* poll,
|
||||
const std::string& ip,
|
||||
const int port)
|
||||
:
|
||||
CPollerObject(poll, 0),
|
||||
mIp(ip),
|
||||
mPort(port)
|
||||
{
|
||||
}
|
||||
|
||||
InvokeHandler::~InvokeHandler()
|
||||
{
|
||||
}
|
||||
|
||||
bool InvokeHandler::initHandler(const bool isInit)
|
||||
{
|
||||
connectToServer();
|
||||
if (isInit && netfd <= 0) return true;
|
||||
if (!isInit && netfd <= 0) return false;
|
||||
|
||||
|
||||
return attachThread();
|
||||
}
|
||||
|
||||
bool InvokeHandler::connectToServer()
|
||||
{
|
||||
bool rslt = DetectUtil::connectServer(netfd, mIp, mPort);
|
||||
if (!rslt)
|
||||
{
|
||||
// return false;
|
||||
// if connect failed. may be the server not startup, reconnect it when
|
||||
// the next time send vote
|
||||
netfd = -1;
|
||||
monitor_log_error("connect to the server failed.");
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("connect to server successful, ip:%s, port:%d, netfd:%d", mIp.c_str(), mPort, netfd);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool InvokeHandler::attachThread()
|
||||
{
|
||||
if (netfd <= 0)
|
||||
{
|
||||
monitor_log_error("invalid socket fd.");
|
||||
return false;
|
||||
}
|
||||
|
||||
EnableInput();
|
||||
int ret = CPollerObject::AttachPoller();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("add event to poll failed.");
|
||||
return false;
|
||||
}
|
||||
monitor_log_error("add invoke handler event to the poll successful. netfd:%d", netfd);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Notice:
|
||||
* 1.in current now, not dealing with the sticky package!!!!!!!!
|
||||
* 2.timeout need to be return with the bigger one between timeout and peer timeout
|
||||
*/
|
||||
bool InvokeHandler::invokeVote(
|
||||
const DetectHandlerBase::DetectType& type,
|
||||
const std::string& detectedAddr,
|
||||
const uint64_t sequenceId,
|
||||
const std::string& data,
|
||||
int& timeout)
|
||||
{
|
||||
if (data.length() <= 0)
|
||||
{
|
||||
monitor_log_error("invoke failed. %s:%d, sequenceId:%" PRIu64, mIp.c_str(), mPort, sequenceId);
|
||||
return false;
|
||||
}
|
||||
|
||||
monitor_log_error("invoke to %s:%d, timeout:%d, sequenceId:%" PRIu64, mIp.c_str(), mPort, timeout, sequenceId);
|
||||
|
||||
if (netfd <= 0)
|
||||
{
|
||||
// reconnect the socket
|
||||
bool rslt = initHandler(false);
|
||||
if (!rslt)
|
||||
{
|
||||
monitor_log_error("invoke vote failed. ip:%s, port:%d", mIp.c_str(), mPort);
|
||||
return false;
|
||||
}
|
||||
// bool rslt = DetectUtil::connectServer(netfd, mIp, mPort);
|
||||
// if (!rslt || netfd <= 0)
|
||||
// {
|
||||
// monitor_log_error("invoke vote failed. netfd:%d", netfd);
|
||||
// return false;
|
||||
// }
|
||||
}
|
||||
|
||||
// should set socket to block, function call maybe has some issue
|
||||
char* request = (char*)malloc(sizeof(MonitorVoteHandler::VoteRequest_t) + data.length());
|
||||
MonitorVoteHandler::VoteRequest_t* pData = (MonitorVoteHandler::VoteRequest_t*)request;
|
||||
pData->sMagicNum = htons(MonitorVoteHandler::sgMagicNum);
|
||||
pData->sSequenceId = sequenceId;
|
||||
DetectUtil::translateByteOrder(pData->sSequenceId);
|
||||
|
||||
// calculate the peer timeout value and pick the bigger one
|
||||
static std::string peerAddr = mIp + DetectHandlerBase::toString<long long int>(mPort); // only c++11 support
|
||||
int peerTimeout = DetectHandlerBase::getInstanceTimeout(type, peerAddr, detectedAddr);
|
||||
timeout = peerTimeout > timeout ? peerTimeout : timeout;
|
||||
pData->sTimeout = htonl(timeout);
|
||||
|
||||
pData->sDetectType = (DetectHandlerBase::DetectType)htonl((uint32_t)type);
|
||||
pData->sDataLen = htons(data.length());
|
||||
memcpy(pData->sDataBuff, data.c_str(), data.length());
|
||||
|
||||
int dataLen = sizeof(MonitorVoteHandler::VoteRequest_t) + data.length();
|
||||
int ret = DetectUtil::sendMessage(netfd, request, dataLen);
|
||||
delete request;
|
||||
if (ret != dataLen)
|
||||
{
|
||||
monitor_log_error("invoke vote failed. netfd:%d", netfd);
|
||||
|
||||
CPollerObject::DetachPoller();
|
||||
close(netfd);
|
||||
netfd = -1;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
monitor_log_error("send data successful, fd:%d, sequenceId:%" PRIu64, netfd, sequenceId);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// handle voted response from peer
|
||||
void InvokeHandler::InputNotify()
|
||||
{
|
||||
#if 0
|
||||
uint16_t magic;
|
||||
int len = DetectUtil::recieveMessage(netfd, (char*)&magic, sizeof(uint16_t));
|
||||
if (0 == len)
|
||||
{
|
||||
monitor_log_error("client close the fd:%d.", netfd);
|
||||
CPollerObject::DetachPoller();
|
||||
// MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
netfd = -1;
|
||||
callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (magic != MonitorVoteHandler::sgMagicNum)
|
||||
{
|
||||
monitor_log_error("receive message failed.");
|
||||
callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t sequenceId;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&sequenceId, sizeof(uint64_t));
|
||||
if (len != sizeof(uint64_t))
|
||||
{
|
||||
monitor_log_error("revieve message failed, fieldLen:%d", len);
|
||||
callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// result
|
||||
bool isVote = false;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&isVote, sizeof(bool));
|
||||
if (len != sizeof(bool))
|
||||
{
|
||||
monitor_log_info("recieve message failed. sequenceId:%" PRIu64 , sequenceId);
|
||||
callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
monitor_log_info("call back to caller. sequenceId:%" PRIu64 , sequenceId);
|
||||
callBack(sequenceId, isVote);
|
||||
#else
|
||||
MonitorVoteHandler::VoteResponse_t response;
|
||||
int len = DetectUtil::recieveMessage(netfd, (char*)&response, sizeof(response));
|
||||
if (len != sizeof(response))
|
||||
{
|
||||
monitor_log_error("client close or read socket failed, fd:%d, len:%d.", netfd, len);
|
||||
|
||||
CPollerObject::DetachPoller();
|
||||
close(netfd);
|
||||
netfd = -1;
|
||||
|
||||
// callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// check magic num
|
||||
if (ntohs(response.sMagicNum) != MonitorVoteHandler::sgMagicNum)
|
||||
{
|
||||
monitor_log_error("receive message failed.");
|
||||
// callBack(0, false);
|
||||
return;
|
||||
}
|
||||
|
||||
DetectUtil::translateByteOrder(response.sSequenceId);
|
||||
monitor_log_error("call back to caller. sequenceId:%" PRIu64 , response.sSequenceId);
|
||||
callBack(response.sSequenceId, response.sIsVote);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
void InvokeHandler::callBack(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote)
|
||||
{
|
||||
InvokeMgr::getInstance()->callBack(sequenceId, isVote);
|
||||
return;
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
////////////////////////////////////////////////////////////
|
||||
//
|
||||
// invoke client for invoking the peer monitor node
|
||||
// created by qiuyu on Nov 27, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __INVOKE_HANDLER_H__
|
||||
#define __INVOKE_HANDLER_H__
|
||||
|
||||
#include "poller.h"
|
||||
#include "DetectHandlerBase.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
class CPollThread;
|
||||
|
||||
class InvokeHandler : public CPollerObject
|
||||
{
|
||||
private:
|
||||
std::string mIp;
|
||||
int mPort;
|
||||
|
||||
public:
|
||||
InvokeHandler(
|
||||
CPollThread* poll,
|
||||
const std::string& ip,
|
||||
const int port);
|
||||
|
||||
virtual ~InvokeHandler();
|
||||
|
||||
virtual void InputNotify(void);
|
||||
|
||||
bool initHandler(const bool isInit);
|
||||
|
||||
bool invokeVote(
|
||||
const DetectHandlerBase::DetectType& type,
|
||||
const std::string& detectedAddr,
|
||||
const uint64_t sequenceId,
|
||||
const std::string& data,
|
||||
int& timeout);
|
||||
|
||||
private:
|
||||
bool attachThread();
|
||||
bool connectToServer();
|
||||
|
||||
void callBack(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote);
|
||||
};
|
||||
|
||||
#endif // __INVOKE_HANDLER_H__
|
@ -1,257 +0,0 @@
|
||||
//////////////////////////////////////////////////
|
||||
//
|
||||
// invoke API for cluster
|
||||
// created by qiuyu on Nov 27,2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#include "InvokeMgr.h"
|
||||
#include "InvokeHandler.h"
|
||||
|
||||
#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
|
||||
int InvokeMgr::smClusterNodeSize = -1;
|
||||
// global sequence id for one process, if it's cluster global will be more better
|
||||
uint64_t InvokeMgr::sgSequenceId = 0;
|
||||
|
||||
InvokeMgr::InvokeMgr()
|
||||
:
|
||||
mInvokePoll(new CPollThread("dtcInvokePoll")),
|
||||
mLock(new CMutex())
|
||||
{
|
||||
// unit must be ms
|
||||
mInvokeTimeout = DtcMonitorConfigMgr::getInstance()->getInvokeTimeout();
|
||||
monitor_log_error("invokeTimeout:%d", mInvokeTimeout);
|
||||
|
||||
mNeedStop = false;
|
||||
mHasInit = false;
|
||||
}
|
||||
|
||||
InvokeMgr::~InvokeMgr()
|
||||
{
|
||||
if (mInvokePoll) delete mInvokePoll;
|
||||
if (mLock) delete mLock;
|
||||
|
||||
for (size_t idx = 0; idx < mInvokeHandlers.size(); idx++)
|
||||
{
|
||||
if (mInvokeHandlers[idx]) delete mInvokeHandlers[idx];
|
||||
}
|
||||
|
||||
CallBackDataItr_t itr = mRemaingRequests.begin();
|
||||
while (itr != mRemaingRequests.end())
|
||||
{
|
||||
if (itr->second) delete itr->second;
|
||||
itr++;
|
||||
}
|
||||
mRemaingRequests.clear();
|
||||
}
|
||||
|
||||
bool InvokeMgr::startInvokeMgr()
|
||||
{
|
||||
if (mHasInit) return true;
|
||||
|
||||
// the highest one byte for physical id
|
||||
int physicalId = DtcMonitorConfigMgr::getInstance()->getPhysicalId();
|
||||
sgSequenceId = ((sgSequenceId + physicalId) << GLOBAL_PHYSICAL_ID_SHIFT) + 1;
|
||||
monitor_log_info("start invokeMgr....");
|
||||
|
||||
// start epoll thread
|
||||
int ret = mInvokePoll->InitializeThread();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("initialize thread poll failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool rslt = initInvokeHandlers();
|
||||
if (!rslt) return false;
|
||||
mInvokePoll->RunningThread();
|
||||
|
||||
monitor_log_info("start invokeMgr successful.... begin sequenceId:%" PRIu64, sgSequenceId);
|
||||
mHasInit = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void InvokeMgr::stopInvokeMgr()
|
||||
{
|
||||
mNeedStop = true;
|
||||
}
|
||||
|
||||
// in current, this function call can not support reentry
|
||||
bool InvokeMgr::invokeVoteSync(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& detectedAddr,
|
||||
const std::string& invokeData,
|
||||
const int timeout,
|
||||
bool& needSwitch)
|
||||
{
|
||||
// return true;
|
||||
bool rslt;
|
||||
SempData_t* sema = new SempData_t();
|
||||
uint64_t sequenceId;
|
||||
for (size_t idx = 0; idx < mInvokeHandlers.size(); idx++)
|
||||
{
|
||||
sequenceId = getSequenceId();
|
||||
|
||||
// use semaphore to keep synchronize for a temp, in the future
|
||||
// should change it with event_fd
|
||||
int waitTime = timeout;
|
||||
mLock->lock();
|
||||
rslt = mInvokeHandlers[idx]->invokeVote(type, detectedAddr, sequenceId, invokeData, waitTime);
|
||||
if (!rslt)
|
||||
{
|
||||
mLock->unlock();
|
||||
continue;
|
||||
}
|
||||
|
||||
// vote for timeout with sempahore, delete it in the callback if needed
|
||||
sema->sTotalVotes++;
|
||||
mRemaingRequests[sequenceId] = sema;
|
||||
mLock->unlock();
|
||||
|
||||
monitor_log_info("wait for response. sequenceId:%" PRIu64, sequenceId);
|
||||
// invoke timeout must be greater than detect timeout in order to wait the objective
|
||||
// response from peer instead of the subjective timeout, its more correctly
|
||||
waitTime = (mInvokeTimeout > waitTime + 10000/*offset*/) ? mInvokeTimeout : (waitTime + 10000);
|
||||
sema->semTimeWait(waitTime);
|
||||
|
||||
mLock->lock();
|
||||
CallBackDataItr_t itr = mRemaingRequests.find(sequenceId);
|
||||
if (itr != mRemaingRequests.end())
|
||||
{
|
||||
if (itr->second->sTotalVotes > ((smClusterNodeSize + 1) >> 1))
|
||||
{
|
||||
// gather majority of peer votes
|
||||
needSwitch = true;
|
||||
monitor_log_info("erase request, sequenceId:%" PRIu64, sequenceId);
|
||||
mRemaingRequests.erase(sequenceId);
|
||||
assert(mRemaingRequests.find(sequenceId) == mRemaingRequests.end());
|
||||
mLock->unlock();
|
||||
|
||||
delete sema;
|
||||
return true;
|
||||
}
|
||||
|
||||
// remove this invoke request from the remaining map
|
||||
monitor_log_info("erase request, sequenceId:%" PRIu64, sequenceId);
|
||||
mRemaingRequests.erase(sequenceId);
|
||||
assert(mRemaingRequests.find(sequenceId) == mRemaingRequests.end());
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("should never come here, sequenceId:%" PRIu64, sequenceId);
|
||||
}
|
||||
|
||||
mLock->unlock();
|
||||
}
|
||||
|
||||
needSwitch = false;
|
||||
delete sema;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool InvokeMgr::callBack(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote)
|
||||
{
|
||||
SempData_t* sem = NULL;
|
||||
|
||||
mLock->lock();
|
||||
|
||||
// no need to deal with the sequenceId 0 because that means network issue and lock
|
||||
// the meaningless thing will cost extra time
|
||||
|
||||
// if (0 == sequenceId)
|
||||
// {
|
||||
// monitor_log_error("has network issue, notice!!!!!!");
|
||||
// mLock->unlock();
|
||||
|
||||
// return true;
|
||||
// }
|
||||
|
||||
CallBackDataItr_t itr = mRemaingRequests.find(sequenceId);
|
||||
if (itr != mRemaingRequests.end())
|
||||
{
|
||||
monitor_log_error("async call call back, isVote:%d, sequenceId:%" PRIu64, isVote, sequenceId);
|
||||
|
||||
sem = itr->second;
|
||||
if (!isVote)
|
||||
{
|
||||
// remove the vote which was set in invoke in order to handle request timeout
|
||||
sem->sTotalVotes--;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
mLock->unlock();
|
||||
monitor_log_error("this request must be timeout, vote it.sequenceId:%" PRIu64, sequenceId);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sem) sem->wakeUp();
|
||||
|
||||
mLock->unlock();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t InvokeMgr::getSequenceId()
|
||||
{
|
||||
// the last bit in sequence id indicate the vote result, 0 means refuse
|
||||
// int temp = sgSequenceId;
|
||||
return __sync_fetch_and_add(&sgSequenceId, 1);
|
||||
}
|
||||
|
||||
// create poll event and add it to the poll
|
||||
bool InvokeMgr::initInvokeHandlers()
|
||||
{
|
||||
if (mInvokeHandlers.size() != 0)
|
||||
{
|
||||
// this means user create this object more than one time,
|
||||
// this can not be permitted
|
||||
monitor_log_error("can not create singleton class for more than one time.");
|
||||
return false;
|
||||
}
|
||||
monitor_log_info("begin to init invoke handlers.");
|
||||
|
||||
// get cluster info
|
||||
const DtcMonitorConfigMgr::PairVector_t& clusterInfo = DtcMonitorConfigMgr::getInstance()->getClusterInfo();
|
||||
if (clusterInfo.size() < 2 || clusterInfo.size() > 0x7FFFFFFF) // the other node is self
|
||||
{
|
||||
monitor_log_error("cluster node can not less than 3 or larger than '0x7FFFFFFF'. size:%u", (int)clusterInfo.size());
|
||||
return false;
|
||||
}
|
||||
smClusterNodeSize = clusterInfo.size();
|
||||
|
||||
bool rslt;
|
||||
InvokeHandler* handler;
|
||||
for (size_t idx = 0; idx < clusterInfo.size(); idx++)
|
||||
{
|
||||
handler = NULL;
|
||||
handler = new InvokeHandler(mInvokePoll, clusterInfo[idx].first, clusterInfo[idx].second);
|
||||
if (!handler)
|
||||
{
|
||||
monitor_log_error("create invokeHandler failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
rslt = handler->initHandler(true);
|
||||
if (!rslt)
|
||||
{
|
||||
monitor_log_error("create invoke handler failed.");
|
||||
return false;
|
||||
}
|
||||
mInvokeHandlers.push_back(handler);
|
||||
}
|
||||
monitor_log_info("create invoke handler successful.");
|
||||
|
||||
return true;
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
//////////////////////////////////////////////////
|
||||
//
|
||||
// invoke API for cluster
|
||||
// created by qiuyu on Nov 27,2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#ifndef __INVOKE_MGR_H__
|
||||
#define __INVOKE_MGR_H__
|
||||
|
||||
#include "singleton.h"
|
||||
#include "DetectHandlerBase.h"
|
||||
#include "Sem.h"
|
||||
|
||||
#define GLOBAL_PHYSICAL_ID_SHIFT (7 * 8)
|
||||
|
||||
class InvokeHandler;
|
||||
class CPollThread;
|
||||
class CMutex;
|
||||
|
||||
class InvokeMgr
|
||||
{
|
||||
typedef struct SemaphoreData
|
||||
{
|
||||
Sem* sSem;
|
||||
int sTotalVotes;
|
||||
|
||||
SemaphoreData()
|
||||
:
|
||||
sSem(new Sem),
|
||||
sTotalVotes(1) // vote for itself
|
||||
{
|
||||
}
|
||||
|
||||
~SemaphoreData()
|
||||
{
|
||||
if (sSem) delete sSem;
|
||||
}
|
||||
|
||||
void semWait()
|
||||
{
|
||||
sSem->semWait();
|
||||
}
|
||||
|
||||
void semTimeWait(const int miSecExpriedTime)
|
||||
{
|
||||
sSem->semTimeWait(miSecExpriedTime);
|
||||
}
|
||||
|
||||
void wakeUp()
|
||||
{
|
||||
sSem->semPost();
|
||||
}
|
||||
}SempData_t;
|
||||
|
||||
friend struct SemaphoreData;
|
||||
|
||||
private:
|
||||
static int smClusterNodeSize; // numbers of cluster peer node
|
||||
static uint64_t sgSequenceId;
|
||||
|
||||
typedef std::map<uint64_t, SempData_t*> CallBackData_t;
|
||||
typedef std::map<uint64_t, SempData_t*>::iterator CallBackDataItr_t;
|
||||
|
||||
private:
|
||||
CPollThread* mInvokePoll;
|
||||
CMutex* mLock;
|
||||
std::vector<InvokeHandler*> mInvokeHandlers;
|
||||
// std::map<uint64_t, SempData_t*> mRemaingRequests;
|
||||
CallBackData_t mRemaingRequests;
|
||||
int mInvokeTimeout;
|
||||
bool mNeedStop;
|
||||
bool mHasInit;
|
||||
|
||||
public:
|
||||
InvokeMgr();
|
||||
~InvokeMgr();
|
||||
|
||||
public:
|
||||
static InvokeMgr* getInstance()
|
||||
{
|
||||
return CSingleton<InvokeMgr>::Instance();
|
||||
}
|
||||
|
||||
bool startInvokeMgr();
|
||||
void stopInvokeMgr();
|
||||
|
||||
bool invokeVoteSync(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& detectedAddr,
|
||||
const std::string& invokeData,
|
||||
const int timeout,
|
||||
bool& needSwitch);
|
||||
|
||||
bool callBack(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote);
|
||||
|
||||
private:
|
||||
uint64_t getSequenceId();
|
||||
bool initInvokeHandlers();
|
||||
};
|
||||
|
||||
#endif // __INVOKE_MGR_H__
|
@ -1,138 +0,0 @@
|
||||
//////////////////////////////////////////////////
|
||||
//
|
||||
// invoke API for cluster
|
||||
// created by qiuyu on Nov 27,2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#ifndef __INVOKE_MGR_H__
|
||||
#define __INVOKE_MGR_H__
|
||||
|
||||
#include "common/singleton.h"
|
||||
#include "DetectHandlerBase.h"
|
||||
#include "Sem.h"
|
||||
|
||||
#define GLOBAL_PHYSICAL_ID_SHIFT (7 * 8)
|
||||
|
||||
class InvokeHandler;
|
||||
class CPollThread;
|
||||
class CMutex;
|
||||
//class Sem;
|
||||
|
||||
class InvokeMgr : public CThread
|
||||
{
|
||||
enum
|
||||
{
|
||||
eMaxExpiredTimeSec = 10
|
||||
};
|
||||
|
||||
// async call timer controler
|
||||
class InvokeTimer : public CTimerObject
|
||||
{
|
||||
private:
|
||||
uint64_t mSequenceId;
|
||||
InvokeMgr* mInvokeMgr;
|
||||
public:
|
||||
InvokeTimer(
|
||||
const uint64_t seq,
|
||||
InvokeMgr* inMgr)
|
||||
{
|
||||
mSequenceId = seq;
|
||||
mInvokeMgr = inMgr;
|
||||
}
|
||||
|
||||
virtual ~InvokeTimer() {}
|
||||
|
||||
virtual void AttachTimer(class CTimerList *container)
|
||||
{
|
||||
CTimerObject::AttachTimer(container);
|
||||
}
|
||||
|
||||
virtual void TimerNotify(void)
|
||||
{
|
||||
mInvokeMgr->callBack(mSequenceId, true, false);
|
||||
}
|
||||
};
|
||||
|
||||
typedef struct SemaphoreData
|
||||
{
|
||||
Sem* sSem;
|
||||
int sTotalVotes;
|
||||
bool sIsTimeout;
|
||||
|
||||
SemaphoreData()
|
||||
:
|
||||
sSem(new Sem),
|
||||
sTotalVotes(1), // vote for itself
|
||||
sIsTimeout(false)
|
||||
{
|
||||
}
|
||||
|
||||
~SemaphoreData()
|
||||
{
|
||||
if (sSem) delete sSem;
|
||||
}
|
||||
|
||||
void semWait()
|
||||
{
|
||||
sSem->semWait();
|
||||
}
|
||||
|
||||
void wakeUp()
|
||||
{
|
||||
sSem->semPost();
|
||||
}
|
||||
}SempData_t;
|
||||
|
||||
private:
|
||||
static int smClusterNodeSize; // numbers of cluster peer node
|
||||
static uint64_t sgSequenceId;
|
||||
|
||||
typedef std::map<uint64_t, SempData_t*> CallBackData_t;
|
||||
typedef std::map<uint64_t, SempData_t*>::iterator CallBackDataItr_t;
|
||||
|
||||
private:
|
||||
CPollThread* mInvokePoll;
|
||||
CMutex* mLock;
|
||||
std::vector<InvokeHandler*> mInvokeHandlers;
|
||||
// std::map<uint64_t, SempData_t*> mRemaingRequests;
|
||||
CallBackData_t mRemaingRequests;
|
||||
CTimerList* mTimerMgr; // control all timers
|
||||
int mTimeout;
|
||||
bool mNeedStop;
|
||||
int mTotalVotes;
|
||||
bool mHasInit;
|
||||
|
||||
public:
|
||||
InvokeMgr();
|
||||
~InvokeMgr();
|
||||
|
||||
protected:
|
||||
// override thread function
|
||||
virtual void* Process(void);
|
||||
|
||||
public:
|
||||
static InvokeMgr* getInstance()
|
||||
{
|
||||
return CSingleton<InvokeMgr>::Instance();
|
||||
}
|
||||
|
||||
bool startInvokeMgr();
|
||||
void stopInvokeMgr();
|
||||
|
||||
bool invokeVoteSync(
|
||||
const DetectHandlerBase::DetectType type,
|
||||
const std::string& invokeData,
|
||||
bool& needSwitch);
|
||||
|
||||
bool callBack(
|
||||
const uint64_t sequenceId,
|
||||
const bool isTimeout,
|
||||
const bool isVote);
|
||||
|
||||
private:
|
||||
uint64_t getSequenceId();
|
||||
bool initInvokeHandlers();
|
||||
};
|
||||
|
||||
#endif // __INVOKE_MGR_H__
|
@ -1,765 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Implementation of the LIRS cache.
|
||||
// Author:qiuyu
|
||||
// Date:Apr 22th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#include "LirsCache.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// stack relevant
|
||||
////////////////////////////////////////////////////////////
|
||||
LirsCache::LirsStack::LirsStack(
|
||||
const int maxLirNum,
|
||||
const int maxStackSize)
|
||||
:
|
||||
mMaxLirEntryNum(maxLirNum),
|
||||
mMaxStackSize(maxStackSize),
|
||||
mCurrLirEntryNum(0),
|
||||
mCurrStackSize(0),
|
||||
mStackBottom(NULL),
|
||||
mStackTop(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LirsCache::LirsStack::~LirsStack()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void
|
||||
LirsCache::LirsStack::clear()
|
||||
{
|
||||
LirsEntry_t *prev, *curr = mStackBottom;
|
||||
while (curr)
|
||||
{
|
||||
prev = curr;
|
||||
curr = curr->sStackNext;
|
||||
|
||||
if (prev->sEntryState & HIR_BLOCK_SHARED)
|
||||
prev->sEntryState &= ~HIR_BLOCK_SHARED;
|
||||
else
|
||||
delete prev;
|
||||
}
|
||||
|
||||
mCurrLirEntryNum = 0;
|
||||
mCurrStackSize = 0;
|
||||
mStackBottom = mStackTop = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
LirsCache::LirsStack::removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry)
|
||||
{
|
||||
if (!entry || !(entry->sEntryState & HIR_BLOCK_ONSTACK))
|
||||
{
|
||||
log_error("internal error, entryEmpty:%d.", !entry);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!entry->sStackPrev)
|
||||
{
|
||||
assert(entry == mStackBottom);
|
||||
|
||||
mStackBottom = entry->sStackNext;
|
||||
if (!mStackBottom) mStackTop = NULL;
|
||||
else mStackBottom->sStackPrev = NULL;
|
||||
}
|
||||
else if (!entry->sStackNext)
|
||||
{
|
||||
assert(entry == mStackTop);
|
||||
|
||||
mStackTop = entry->sStackPrev;
|
||||
if (!mStackTop) mStackBottom = NULL;
|
||||
else mStackTop->sStackNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(entry != mStackBottom && entry != mStackTop);
|
||||
|
||||
entry->sStackPrev->sStackNext = entry->sStackNext;
|
||||
entry->sStackNext->sStackPrev = entry->sStackPrev;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
bool canRelease = (releaseEntry && !(state & HIR_BLOCK_SHARED));
|
||||
if (state & LIR_BLOCK) mCurrLirEntryNum--;
|
||||
state &= (~HIR_BLOCK_ONSTACK & ~HIR_BLOCK_SHARED & ~LIR_BLOCK);
|
||||
mCurrStackSize--;
|
||||
|
||||
if (canRelease)
|
||||
{
|
||||
log_info("remove entry, key:%s", entry->sKey.c_str());
|
||||
entryMap.erase(entry->sKey);
|
||||
delete entry;
|
||||
entry = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry->sStackPrev = entry->sStackNext = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// 1.when call this function, must be has enough space for the appending entry
|
||||
// 2.the entry must be a non-exist entry in the stack
|
||||
void
|
||||
LirsCache::LirsStack::appendEntry(LirsEntry_t *entry)
|
||||
{
|
||||
if (!entry)
|
||||
{
|
||||
log_error("append empty entry.");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (state < 0 || (mCurrLirEntryNum >= mMaxLirEntryNum && (state & LIR_BLOCK)))
|
||||
{
|
||||
log_error("no enough space for the Lir entry");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mCurrStackSize >= mMaxStackSize)
|
||||
{
|
||||
log_error("no enough space for the Hir entry");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// has enough space, append it
|
||||
if (!mStackTop)
|
||||
{
|
||||
// the first one
|
||||
mStackTop = mStackBottom = entry;
|
||||
entry->sStackPrev = NULL;
|
||||
entry->sStackNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
// append to the behind of the top entry
|
||||
mStackTop->sStackNext = entry;
|
||||
entry->sStackPrev = mStackTop;
|
||||
mStackTop = entry;
|
||||
mStackTop->sStackNext = NULL;
|
||||
}
|
||||
|
||||
if (state & LIR_BLOCK) mCurrLirEntryNum++;
|
||||
mCurrStackSize++;
|
||||
|
||||
state |= HIR_BLOCK_ONSTACK;
|
||||
if (state & (HIR_BLOCK_ONQUEUE | HIR_RESIDENT_BLOCK)) state |= HIR_BLOCK_SHARED;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// evicted all HIR blocks those located in the bottom of stack
|
||||
void
|
||||
LirsCache::LirsStack::stackPrune(std::map<std::string, LirsEntry_t*> &entryMap)
|
||||
{
|
||||
if (!mStackBottom) return;
|
||||
|
||||
while (mStackBottom)
|
||||
{
|
||||
if (mStackBottom->sEntryState & LIR_BLOCK) break;
|
||||
removeEntry(mStackBottom, entryMap);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// release one hir block from the bottom of the stack
|
||||
void
|
||||
LirsCache::LirsStack::releaseOneHirEntry(std::map<std::string, LirsEntry_t*> &entryMap)
|
||||
{
|
||||
if (!mStackBottom) return;
|
||||
|
||||
LirsEntry_t *curr = mStackBottom->sStackNext;
|
||||
while (curr)
|
||||
{
|
||||
if (curr->sEntryState & LIR_BLOCK) curr = curr->sStackNext;
|
||||
|
||||
// remove the entry
|
||||
removeEntry(curr, entryMap, true);
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Lirs queue relevant
|
||||
///////////////////////////////////////////////////////////
|
||||
LirsCache::LirsQueue::LirsQueue(const int maxQueueSize)
|
||||
:
|
||||
mMaxQueueSize(maxQueueSize),
|
||||
mCurrQueueSize(0),
|
||||
mQueueHead(NULL),
|
||||
mQueueTail(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LirsCache::LirsQueue::~LirsQueue()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
void
|
||||
LirsCache::LirsQueue::clear()
|
||||
{
|
||||
LirsEntry_t *prev, *curr = mQueueHead;
|
||||
while (curr)
|
||||
{
|
||||
prev = curr;
|
||||
curr = curr->sQueueNext;
|
||||
|
||||
if (prev->sEntryState & HIR_BLOCK_SHARED)
|
||||
prev->sEntryState &= ~HIR_BLOCK_SHARED;
|
||||
else
|
||||
delete prev;
|
||||
}
|
||||
|
||||
mCurrQueueSize = 0;
|
||||
mQueueHead = mQueueTail = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// evicted the resident HIR block from the queue
|
||||
// use flag 'release' to forbidden the caller to release the entry
|
||||
// if someone holding it current now
|
||||
void
|
||||
LirsCache::LirsQueue::removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool release)
|
||||
{
|
||||
if (!entry)
|
||||
{
|
||||
log_error("can not remove an empty entry.");
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (!(state & HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
assert(false);
|
||||
log_error("incorrect entry state.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!entry->sQueuePrev)
|
||||
{
|
||||
mQueueHead = entry->sQueueNext;
|
||||
if (!mQueueHead) mQueueTail = NULL;
|
||||
else mQueueHead->sQueuePrev = NULL;
|
||||
}
|
||||
else if (!entry->sQueueNext)
|
||||
{
|
||||
mQueueTail = entry->sQueuePrev;
|
||||
if (!mQueueTail) mQueueHead = NULL;
|
||||
else mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry->sQueuePrev->sQueueNext = entry->sQueueNext;
|
||||
entry->sQueueNext->sQueuePrev = entry->sQueuePrev;
|
||||
}
|
||||
|
||||
// double check
|
||||
if (release && !(state & HIR_BLOCK_ONSTACK) && !(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_info("remove entry, key:%s", entry->sKey.c_str());
|
||||
entryMap.erase(entry->sKey);
|
||||
delete entry;
|
||||
entry = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
// clear flag
|
||||
entry->sQueuePrev = entry->sQueueNext = NULL;
|
||||
state &= (~HIR_BLOCK_ONQUEUE & ~HIR_BLOCK_SHARED & ~HIR_RESIDENT_BLOCK);
|
||||
}
|
||||
mCurrQueueSize--;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// when call this function, queue should has enough remaining space for appending
|
||||
void
|
||||
LirsCache::LirsQueue::appendEntry(LirsEntry_t *entry)
|
||||
{
|
||||
if (!entry || (entry->sEntryState & LIR_BLOCK))
|
||||
{
|
||||
log_error("empty entry:%d.", entry == NULL);
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (state < 0 || mCurrQueueSize >= mMaxQueueSize)
|
||||
{
|
||||
log_error("incorrect queue data.");
|
||||
return;
|
||||
}
|
||||
|
||||
// just append to the tail directly
|
||||
if (!mQueueTail)
|
||||
{
|
||||
mQueueHead = mQueueTail = entry;
|
||||
mQueueHead->sQueuePrev = NULL;
|
||||
mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
mQueueTail->sQueueNext = entry;
|
||||
entry->sQueuePrev = mQueueTail;
|
||||
mQueueTail = entry;
|
||||
mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
mCurrQueueSize++;
|
||||
|
||||
state |= (HIR_BLOCK_ONQUEUE | HIR_RESIDENT_BLOCK);
|
||||
state &= ~LIR_BLOCK;
|
||||
if (state & HIR_BLOCK_ONSTACK) state |= HIR_BLOCK_SHARED;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// LIRS cache relevant
|
||||
///////////////////////////////////////////////////////////
|
||||
LirsCache::LirsCache(const int cacheSize)
|
||||
:
|
||||
mCacheEntrySize(cacheSize)
|
||||
{
|
||||
if (mCacheEntrySize < eMinCacheEntrySize || mCacheEntrySize > eMaxCacheEntrySize)
|
||||
mCacheEntrySize = mCacheEntrySize < eMinCacheEntrySize ? eMinCacheEntrySize : mCacheEntrySize;
|
||||
|
||||
int queueSize = mCacheEntrySize * eQueueSizeRate / 100;
|
||||
int maxLirEntryNum = mCacheEntrySize - queueSize;
|
||||
int maxStackSize = mCacheEntrySize + queueSize; // the extra queue size for holding non-resident HIR blocks
|
||||
|
||||
mBlockStack = new LirsStack(maxLirEntryNum, maxStackSize);
|
||||
mBlockQueue = new LirsQueue(queueSize);
|
||||
}
|
||||
|
||||
LirsCache::~LirsCache()
|
||||
{
|
||||
if (mBlockStack) delete mBlockStack;
|
||||
if (mBlockQueue) delete mBlockQueue;
|
||||
}
|
||||
|
||||
// find the key and adjust the lirs cache
|
||||
std::string
|
||||
LirsCache::findEntry(const std::string &key)
|
||||
{
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr == mEntryMap.end()) return "";
|
||||
|
||||
LirsEntry_t *entry = itr->second;
|
||||
assert(entry != NULL);
|
||||
if (!entry || !(entry->sEntryState & HIR_RESIDENT_BLOCK)) return "";
|
||||
|
||||
// hit Lir or Resident Hir block, adjust the cache
|
||||
adjustLirsCache(entry);
|
||||
syntaxCheck();
|
||||
return entry->sValue;
|
||||
}
|
||||
|
||||
// 1.if exist, update the value
|
||||
// 2.append a new entry
|
||||
bool
|
||||
LirsCache::appendEntry(
|
||||
const std::string &key,
|
||||
const std::string &value)
|
||||
{
|
||||
// find in the stack first
|
||||
LirsEntry_t *entry = NULL;
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr != mEntryMap.end())
|
||||
{
|
||||
entry = itr->second;
|
||||
#if (__cplusplus >= 201103L)
|
||||
// c++0x, use rvalue reference, value can not be used any more
|
||||
entry->sValue = std::move(value);
|
||||
#else
|
||||
entry->sValue = value;
|
||||
#endif // __cplusplus >= 201103L
|
||||
adjustLirsCache(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("update entry, key:%s, value:%s, state:%d", key.c_str(),\
|
||||
value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
// append a new entry
|
||||
entry = new LirsEntry_t();
|
||||
if (!entry)
|
||||
{
|
||||
log_error("allocate memory failed.");
|
||||
return false;
|
||||
}
|
||||
entry->initEntry(0, NULL, NULL, NULL, NULL, key, value);
|
||||
char &state = entry->sEntryState;
|
||||
|
||||
// add into the map
|
||||
mEntryMap[key] = entry;
|
||||
|
||||
// make sure have enough space for appending
|
||||
bool isLirFull = mBlockStack->isLirEntryFull();
|
||||
bool isStackFull = mBlockStack->isStackFull();
|
||||
if (!isLirFull)
|
||||
{
|
||||
if (isStackFull) mBlockStack->releaseOneHirEntry(mEntryMap);
|
||||
|
||||
// add as a lir entry
|
||||
state |= LIR_BLOCK;
|
||||
mBlockStack->appendEntry(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
// add as a resident HIR block
|
||||
bool isQueueFull = mBlockQueue->isHirEntryFull();
|
||||
if (isQueueFull || isStackFull)
|
||||
{
|
||||
if (isQueueFull)
|
||||
{
|
||||
// remove resident HIR block from queue
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
mBlockQueue->removeEntry(head, mEntryMap);
|
||||
}
|
||||
|
||||
// check whether the stack is full or not
|
||||
if (isStackFull)
|
||||
{
|
||||
// remove the lir block in the bottom
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
// append entry as a lir block
|
||||
state |= LIR_BLOCK;
|
||||
mBlockStack->appendEntry(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// append to both the stack and the queue as an resident block
|
||||
// state |= (HIR_RESIDENT_BLOCK | HIR_BLOCK_SHARED);
|
||||
mBlockStack->appendEntry(entry);
|
||||
mBlockQueue->appendEntry(entry);
|
||||
assert(state == 30);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LirsCache::removeEntry(const std::string &key)
|
||||
{
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr == mEntryMap.end()) return true;
|
||||
|
||||
LirsEntry_t *entry = itr->second;
|
||||
char state = entry->sEntryState;
|
||||
|
||||
// remove from the stack
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
mBlockStack->removeEntry(entry, mEntryMap);
|
||||
|
||||
// try to conduct a pruning
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
}
|
||||
|
||||
// remove from the queue
|
||||
if (state & HIR_BLOCK_ONQUEUE)
|
||||
{
|
||||
mBlockQueue->removeEntry(entry, mEntryMap);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
LirsCache::clear()
|
||||
{
|
||||
if (mBlockStack) mBlockStack->clear();
|
||||
if (mBlockQueue) mBlockQueue->clear();
|
||||
mEntryMap.clear();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// entry must be exist in the cache, even if it's a non-resident block
|
||||
void
|
||||
LirsCache::adjustLirsCache(LirsEntry_t *entry)
|
||||
{
|
||||
char &state = entry->sEntryState;
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
// lir block
|
||||
// bool inStackBottom = (entry->sStackPrev == NULL);
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
// maybe the removed entry is bottom, try to conduct a stack pruning
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
else
|
||||
{
|
||||
// hir block
|
||||
if (state & HIR_RESIDENT_BLOCK)
|
||||
{
|
||||
// resident hir block
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
// evicted from queue
|
||||
mBlockQueue->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
// move the bottom entry in the stack to the end of the queue
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
|
||||
// evicted myself from stack
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
else
|
||||
{
|
||||
// 1.leave its status in HIR and move this block to the end of the queue
|
||||
mBlockQueue->removeEntry(entry, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(entry);
|
||||
|
||||
// 2.append to the stack
|
||||
bool isStackFull = mBlockStack->isStackFull();
|
||||
if (isStackFull)
|
||||
{
|
||||
// remove the first HIR entry from stack
|
||||
mBlockStack->releaseOneHirEntry(mEntryMap);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// non-resident hir block, block must be in the stack, if not in the stack,
|
||||
// it must be a new entry that we should call appendEntry function to add it
|
||||
if (!(state & HIR_BLOCK_ONSTACK) || (state & HIR_BLOCK_ONQUEUE))
|
||||
{
|
||||
log_error("internal error.");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// remove the resident HIR block from the head of queue first
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
mBlockQueue->removeEntry(head, mEntryMap, true);
|
||||
|
||||
// move the entry in the bottom of the stack into the tail of the queue
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
|
||||
// remove the entry from the stack first, then conduct stack prune
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
}
|
||||
|
||||
// append this entry to the top of the stack
|
||||
mBlockStack->appendEntry(entry);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// check LIRS cache
|
||||
bool LirsCache::syntaxCheck()
|
||||
{
|
||||
int stackBlockNum = 0;
|
||||
int stackLirBlockNum = 0;
|
||||
int stackRHirBlockNum = 0;
|
||||
int stackNRHirBlockNum = 0;
|
||||
int queueSharedBlockNum = 0;
|
||||
|
||||
// check stack
|
||||
if (mBlockStack)
|
||||
{
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
LirsEntry_t *top = mBlockStack->getTopOfStack();
|
||||
|
||||
char state;
|
||||
LirsEntry_t *prev = NULL, *curr = bottom;
|
||||
while (curr)
|
||||
{
|
||||
state = curr->sEntryState;
|
||||
if (state <= 0 ||
|
||||
state > (HIR_BLOCK_SHARED + HIR_BLOCK_ONQUEUE + HIR_BLOCK_ONSTACK + HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect entry state.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(state & HIR_BLOCK_ONSTACK))
|
||||
{
|
||||
log_error("incorrect LIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
stackBlockNum++;
|
||||
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
if ((state & HIR_RESIDENT_BLOCK)
|
||||
|| (state & HIR_BLOCK_ONQUEUE)
|
||||
|| (state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackLirBlockNum++;
|
||||
}
|
||||
else if (state & HIR_RESIDENT_BLOCK)
|
||||
{
|
||||
if (!(state & HIR_BLOCK_ONQUEUE)
|
||||
|| !(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackRHirBlockNum++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((state & HIR_BLOCK_ONQUEUE)
|
||||
|| (state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackNRHirBlockNum++;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = curr->sStackNext;
|
||||
if (curr && prev != curr->sStackPrev)
|
||||
{
|
||||
log_error("incorrect double link.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
assert(prev == top);
|
||||
}
|
||||
|
||||
// check cache size
|
||||
if (stackRHirBlockNum > mBlockQueue->getCurrQueueSize())
|
||||
{
|
||||
log_error("check RHir block failed.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
// check queue
|
||||
if (mBlockQueue)
|
||||
{
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
LirsEntry_t *tail = mBlockQueue->getTailOfQueue();
|
||||
|
||||
char state;
|
||||
LirsEntry_t *prev = NULL, *curr = head;
|
||||
while (curr)
|
||||
{
|
||||
state = curr->sEntryState;
|
||||
if (state <= 0 ||
|
||||
state > (HIR_BLOCK_SHARED + HIR_BLOCK_ONQUEUE + HIR_BLOCK_ONSTACK + HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect entry state.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(state & HIR_BLOCK_ONQUEUE) || !(state & HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
if (!(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
queueSharedBlockNum++;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = curr->sQueueNext;
|
||||
if (curr && prev != curr->sQueuePrev)
|
||||
{
|
||||
log_error("incorrect double link.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
assert(prev == tail);
|
||||
}
|
||||
|
||||
if (stackRHirBlockNum != queueSharedBlockNum)
|
||||
{
|
||||
log_error("shared pointer occur error.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
@ -1,167 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Implementation of the LIRS cache.
|
||||
// Author:qiuyu
|
||||
// Date:Apr 22th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef LIRS_CACHE_H__
|
||||
#define LIRS_CACHE_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
// LIRS use two data structure to hold all cache data, LIRS stack and queue.
|
||||
// Data to be described as hot data and cold data, hot data names LIR
|
||||
// and cold data is HIR, all LIR data are located in the LIRS stack and
|
||||
// the others located either in the stack or queue; The HIR data also be
|
||||
// divided into resident and non-resident, all resident HIR data are linked
|
||||
// to be a small size queue
|
||||
|
||||
#define LIR_BLOCK 1 // Hot data
|
||||
#define HIR_RESIDENT_BLOCK 2 // HIR is cold data
|
||||
#define HIR_BLOCK_ONSTACK 4
|
||||
#define HIR_BLOCK_ONQUEUE 8
|
||||
#define HIR_BLOCK_SHARED 16 // shared Resident HIR entry reference between Stack and Queue
|
||||
// 1.Unfixed data type(include either key or value):
|
||||
// unsigned long long, float, double, string
|
||||
// 2.Except 1, others is fixed, such as the following:
|
||||
// char, short, int, the unsigned series that size is small than 8, and so on
|
||||
// #define HIR_BLOCK_FIXED 32
|
||||
|
||||
typedef struct LirsEntry
|
||||
{
|
||||
char sEntryState;
|
||||
// 1.we assume that the value is big enough, so the space cost in using shared entry
|
||||
// mechanism(two double link pointer) will be cheaper than clone the same entry
|
||||
// 2.use shared ptr let us implement the LRU cache with only one Hashmap
|
||||
struct LirsEntry *sStackPrev;
|
||||
struct LirsEntry *sStackNext;
|
||||
struct LirsEntry *sQueuePrev;
|
||||
struct LirsEntry *sQueueNext;
|
||||
std::string sKey;
|
||||
std::string sValue;
|
||||
|
||||
void initEntry(
|
||||
const char state,
|
||||
struct LirsEntry *sPrev,
|
||||
struct LirsEntry *sNext,
|
||||
struct LirsEntry *qPrev,
|
||||
struct LirsEntry *qNext,
|
||||
const std::string &key,
|
||||
const std::string &value)
|
||||
{
|
||||
sEntryState = state;
|
||||
sStackPrev = sPrev;
|
||||
sStackNext = sNext;
|
||||
sQueuePrev = qPrev;
|
||||
sQueueNext = qNext;
|
||||
#if (__cplusplus >= 201103L)
|
||||
sKey = std::move(key);
|
||||
sValue = std::move(value);
|
||||
#else
|
||||
sKey = key;
|
||||
sValue = value;
|
||||
#endif
|
||||
}
|
||||
}LirsEntry_t;
|
||||
|
||||
|
||||
class LirsCache
|
||||
{
|
||||
private:
|
||||
enum CacheRelevant
|
||||
{
|
||||
eQueueSizeRate = 1, // 1%
|
||||
eMinCacheEntrySize = 100,
|
||||
eMaxCacheEntrySize = 500000
|
||||
};
|
||||
|
||||
private:
|
||||
typedef std::map<std::string, LirsEntry_t*>::iterator MapItr_t;
|
||||
|
||||
class LirsStack
|
||||
{
|
||||
private:
|
||||
int mMaxLirEntryNum; // Maximum LIR entry number
|
||||
int mMaxStackSize; // maximum real stack capacity, contain LIR + resident HIR + non-resident blocks
|
||||
int mCurrLirEntryNum;
|
||||
int mCurrStackSize;
|
||||
LirsEntry_t* mStackBottom;
|
||||
LirsEntry_t* mStackTop;
|
||||
|
||||
public:
|
||||
LirsStack(const int maxLir, const int maxStackSize);
|
||||
virtual ~LirsStack();
|
||||
|
||||
inline LirsEntry_t* getBottomOfStack() { return mStackBottom; }
|
||||
inline LirsEntry_t* getTopOfStack() { return mStackTop; }
|
||||
inline bool isLirEntryFull() { return mCurrLirEntryNum >= mMaxLirEntryNum; }
|
||||
inline bool isStackFull() { return mCurrStackSize >= mMaxStackSize; }
|
||||
void stackPrune(std::map<std::string, LirsEntry_t*> &entryMap);
|
||||
void releaseOneHirEntry(std::map<std::string, LirsEntry_t*> &entryMap);
|
||||
|
||||
void appendEntry(LirsEntry_t *entry);
|
||||
void removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry = true);
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class LirsQueue
|
||||
{
|
||||
private:
|
||||
int mMaxQueueSize; // Maximum resident HIR entry number
|
||||
int mCurrQueueSize;
|
||||
LirsEntry_t *mQueueHead;
|
||||
LirsEntry_t *mQueueTail;
|
||||
|
||||
public:
|
||||
LirsQueue(const int maxQueueSize);
|
||||
virtual ~LirsQueue();
|
||||
|
||||
inline LirsEntry_t* getHeadOfQueue() { return mQueueHead; }
|
||||
inline LirsEntry_t* getTailOfQueue() { return mQueueTail; }
|
||||
inline bool isHirEntryFull() { return mCurrQueueSize >= mMaxQueueSize; }
|
||||
inline int getCurrQueueSize() { return mCurrQueueSize; }
|
||||
|
||||
void appendEntry(LirsEntry_t *entry);
|
||||
void removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry = true);
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
public:
|
||||
explicit LirsCache(const int cacheSize = eMaxCacheEntrySize);
|
||||
virtual ~LirsCache();
|
||||
|
||||
std::string findEntry(const std::string &key);
|
||||
|
||||
// user convert all basic data type to string
|
||||
bool appendEntry(const std::string &key, const std::string &value);
|
||||
bool removeEntry(const std::string &key);
|
||||
void clear();
|
||||
|
||||
private:
|
||||
void adjustLirsCache(LirsEntry_t * entry);
|
||||
bool syntaxCheck();
|
||||
|
||||
private:
|
||||
int mCacheEntrySize; // LIR and resident HIR block nums
|
||||
LirsStack* mBlockStack; // store all LIR blocks and some HIR blocks
|
||||
LirsQueue* mBlockQueue; // store all resident HIR blocks
|
||||
std::map<std::string, LirsEntry_t*> mEntryMap; // store all cache data for efficient search
|
||||
|
||||
friend class LirsStack;
|
||||
friend class LirsQueue;
|
||||
};
|
||||
|
||||
#endif // LIRS_CACHE_H__
|
@ -1,23 +0,0 @@
|
||||
CC := g++
|
||||
AR := ar rc
|
||||
LIBS := -lrt -lpthread -lcommon -ljsoncpp -lcurl64 -libmysqlclient.a
|
||||
|
||||
INCLUDE := -I../ -I../../ -I../../common -I../../3rdparty/jsoncpp/include/ -I../../3rdparty/dtc/include/ -I../../3rdparty/mysql64/include/mysql/ -I../../3rdparty/curl/include64/
|
||||
|
||||
LIBPATH := -L../../common -L../../3rdparty/jsoncpp/lib/ -L../../3rdparty/mysql64/lib/mysql/ -L../../3rdparty/curl/lib
|
||||
|
||||
SRCS := $(wildcard *.cpp)
|
||||
|
||||
OBJECTS := $(patsubst %.cpp,%.o,$(SRCS))
|
||||
|
||||
CFLAGS := -g -fPIC -Wall -O3 -c $(INCLUDE) $(LIBPATH)
|
||||
|
||||
TARGETLIB := libDtcMonitor.a
|
||||
|
||||
all:
|
||||
$(CC) $(CFLAGS) $(SRCS) $(LIBS)
|
||||
$(AR) $(TARGETLIB) $(OBJECTS)
|
||||
|
||||
clean:
|
||||
rm -f *.o
|
||||
rm -f $(TARGETLIB)
|
@ -1,204 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle detector cluster vote request
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#include "MonitorVoteHandler.h"
|
||||
#include "DetectUtil.h"
|
||||
#include "MonitorVoteHandlerMgr.h"
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
|
||||
MonitorVoteHandler::MonitorVoteHandler(
|
||||
CPollThread* poll,
|
||||
const int fd)
|
||||
:
|
||||
CPollerObject(poll, fd)
|
||||
{
|
||||
}
|
||||
|
||||
MonitorVoteHandler::~MonitorVoteHandler()
|
||||
{
|
||||
}
|
||||
|
||||
int MonitorVoteHandler::addVoteEventToPoll()
|
||||
{
|
||||
EnableInput();
|
||||
int ret = CPollerObject::AttachPoller();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("add event to poll failed.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
monitor_log_error("add handler to event poll successful, fd:%d", netfd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int MonitorVoteHandler::removeFromEventPoll()
|
||||
{
|
||||
CPollerObject::DetachPoller();
|
||||
|
||||
monitor_log_error("delete vote handler event from poll successful, fd:%d", netfd);
|
||||
// close will be done on super class's destructor
|
||||
// close(netfd);
|
||||
// netfd = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// handle vote request and send response
|
||||
// because not thinking about the split or delay of data package, so if
|
||||
// recieving unexpected message, just close the socket. the client will
|
||||
// reconnect during the next invoking
|
||||
void MonitorVoteHandler::InputNotify()
|
||||
{
|
||||
uint16_t magic = 0;
|
||||
int len = DetectUtil::recieveMessage(netfd, (char*)&magic, sizeof(uint16_t));
|
||||
magic = ntohs(magic);
|
||||
if (len != sizeof(uint16_t) || magic != sgMagicNum)
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, magic:%d", netfd, len, magic);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
|
||||
// sequeceId
|
||||
uint64_t sequenceId;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&sequenceId, sizeof(uint64_t));
|
||||
if (len != sizeof(uint64_t))
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, sequenceId:%" PRIu64, netfd, len, sequenceId);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
DetectUtil::translateByteOrder(sequenceId);
|
||||
|
||||
int timeout = 0;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&timeout, sizeof(int));
|
||||
if (len != sizeof(int))
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, sequenceId:%" PRIu64, netfd, len, sequenceId);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
timeout = ntohl(timeout);
|
||||
|
||||
DetectHandlerBase::DetectType detectType;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&detectType, sizeof(detectType));
|
||||
if (len != sizeof(detectType))
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, sequenceId:%" PRIu64, netfd, len, sequenceId);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
detectType = (DetectHandlerBase::DetectType)(ntohl(detectType));
|
||||
|
||||
uint16_t dataLen;
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&dataLen, sizeof(uint16_t));
|
||||
if (len != sizeof(uint16_t))
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, sequenceId:%" PRIu64, netfd, len, sequenceId);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
dataLen = ntohs(dataLen);
|
||||
|
||||
char dataBuff[dataLen + 1];
|
||||
len = DetectUtil::recieveMessage(netfd, (char*)&dataBuff, dataLen);
|
||||
if (len != dataLen)
|
||||
{
|
||||
monitor_log_error("recieve message failed. fd:%d, len:%d, sequenceId:%" PRIu64, netfd, len, sequenceId);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
return;
|
||||
}
|
||||
dataBuff[dataLen] = 0;
|
||||
|
||||
monitor_log_error("recieve vote request, timeout:%d, sequenceId:%" PRIu64, timeout, sequenceId);
|
||||
|
||||
bool isAlive = true;
|
||||
procVoteRequest(detectType, dataBuff, timeout, isAlive);
|
||||
|
||||
if (isAlive)
|
||||
{
|
||||
monitor_log_error("deny the request, sequenceId:%" PRIu64, sequenceId);
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("vote the request, sequenceId:%" PRIu64, sequenceId);
|
||||
}
|
||||
sendResponse(sequenceId, !isAlive);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool MonitorVoteHandler::procVoteRequest(
|
||||
const DetectHandlerBase::DetectType requestType,
|
||||
const char* dataBuff,
|
||||
const int timeout,
|
||||
bool& isAlive)
|
||||
{
|
||||
int errCode;
|
||||
|
||||
switch (requestType)
|
||||
{
|
||||
case DetectHandlerBase::eAgentDetect:
|
||||
{
|
||||
std::string accessKey, ipWithPort;
|
||||
int accessKeyLen = *(uint8_t*)dataBuff;
|
||||
accessKey.assign(dataBuff + sizeof(uint8_t), accessKeyLen);
|
||||
int ipWithPortLen = *(uint8_t*)(dataBuff + sizeof(uint8_t) + accessKeyLen);
|
||||
ipWithPort.assign(dataBuff + sizeof(uint8_t)*2 + accessKeyLen, ipWithPortLen);
|
||||
int tout = timeout > 0 ? timeout : DtcMonitorConfigMgr::eAgentDefaultTimeout;
|
||||
|
||||
monitor_log_error("start to detect agent. accessKey:%s, ipWithPort:%s", accessKey.c_str(), ipWithPort.c_str());
|
||||
|
||||
return DetectUtil::detectAgentInstance(accessKey, ipWithPort, tout, isAlive, errCode);
|
||||
}
|
||||
case DetectHandlerBase::eDtcDetect:
|
||||
{
|
||||
std::string ipWithPort(dataBuff);
|
||||
int tout = timeout > 0 ? timeout : DtcMonitorConfigMgr::eDtcDefaultTimeout;
|
||||
monitor_log_error("start to detect dtc. ipWithPort:%s", ipWithPort.c_str());
|
||||
|
||||
return DetectUtil::detectDtcInstance(ipWithPort, tout, isAlive, errCode);
|
||||
}
|
||||
default:
|
||||
// if proc failed, not vote
|
||||
isAlive = true;
|
||||
monitor_log_error("Error: serious internal error.......");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void MonitorVoteHandler::sendResponse(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote)
|
||||
{
|
||||
VoteResponse_t response;
|
||||
response.sMagicNum = htons(sgMagicNum);
|
||||
response.sSequenceId = sequenceId;
|
||||
DetectUtil::translateByteOrder(response.sSequenceId);
|
||||
response.sIsVote = isVote;
|
||||
int ret = DetectUtil::sendMessage(netfd, (char*)&response, sizeof(VoteResponse_t));
|
||||
if (ret != sizeof(VoteResponse_t))
|
||||
{
|
||||
monitor_log_error("send response failed. netfd:%d, sequenceId:%" PRIu64, netfd, sequenceId);
|
||||
}
|
||||
|
||||
monitor_log_error("send response successful. netfd:%d, sequenceId:%" PRIu64, netfd, sequenceId);
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
// close the connection
|
||||
monitor_log_error("close the connection, netfd:%d", netfd);
|
||||
MonitorVoteHandlerMgr::getInstance()->removeHandler(this);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// connections between cluster using to send vote request
|
||||
// and response
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __MONITOR_VOTE_HANDLER_H__
|
||||
#define __MONITOR_VOTE_HANDLER_H__
|
||||
|
||||
#include "DetectHandlerBase.h"
|
||||
|
||||
class MonitorVoteHandler : public CPollerObject
|
||||
{
|
||||
public:
|
||||
static const uint16_t sgMagicNum = 12345; // global magic number
|
||||
|
||||
// private:
|
||||
// CPollThread* mThreadPoll,
|
||||
#pragma pack(push, 1)
|
||||
typedef struct VoteRequestTrans
|
||||
{
|
||||
uint16_t sMagicNum;
|
||||
uint64_t sSequenceId; // for sync call
|
||||
int sTimeout;
|
||||
DetectHandlerBase::DetectType sDetectType;
|
||||
uint16_t sDataLen;
|
||||
char sDataBuff[];
|
||||
}VoteRequest_t;
|
||||
|
||||
typedef struct VoteResponseTrans
|
||||
{
|
||||
uint16_t sMagicNum;
|
||||
uint64_t sSequenceId; // for sync call
|
||||
bool sIsVote;
|
||||
}VoteResponse_t;
|
||||
#pragma pack(pop)
|
||||
|
||||
public:
|
||||
MonitorVoteHandler(
|
||||
CPollThread* poll,
|
||||
const int fd);
|
||||
|
||||
virtual ~MonitorVoteHandler();
|
||||
|
||||
int addVoteEventToPoll();
|
||||
int removeFromEventPoll();
|
||||
virtual void InputNotify(void);
|
||||
|
||||
private:
|
||||
bool procVoteRequest(
|
||||
const DetectHandlerBase::DetectType requestType,
|
||||
const char* dataBuff,
|
||||
const int timeout,
|
||||
bool& isAlive);
|
||||
|
||||
void sendResponse(
|
||||
const uint64_t sequenceId,
|
||||
const bool isVote);
|
||||
};
|
||||
|
||||
#endif // __MONITOR_VOTE_HANDLER_H__
|
@ -1,80 +0,0 @@
|
||||
//////////////////////////////////////////////////
|
||||
//
|
||||
// epoll driver that handle client vote request event,
|
||||
// created by qiuyu on Nov 27,2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#include "MonitorVoteHandlerMgr.h"
|
||||
#include "MonitorVoteHandler.h"
|
||||
#include "poll_thread.h"
|
||||
#include "lock.h"
|
||||
|
||||
MonitorVoteHandlerMgr::MonitorVoteHandlerMgr()
|
||||
:
|
||||
mVoteEventPoll(new CPollThread("handleVoteThreadPoll")),
|
||||
mMutexLock(new CMutex())
|
||||
{
|
||||
}
|
||||
|
||||
MonitorVoteHandlerMgr::~MonitorVoteHandlerMgr()
|
||||
{
|
||||
if (mVoteEventPoll) delete mVoteEventPoll;
|
||||
if (mMutexLock) delete mMutexLock;
|
||||
|
||||
for (size_t idx = 0; idx < mVoteHandlers.size(); idx++)
|
||||
{
|
||||
if (mVoteHandlers[idx]) delete mVoteHandlers[idx];
|
||||
}
|
||||
}
|
||||
|
||||
bool MonitorVoteHandlerMgr::startVoteHandlerMgr()
|
||||
{
|
||||
int ret = mVoteEventPoll->InitializeThread();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("initialize thread poll failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
mVoteEventPoll->RunningThread();
|
||||
monitor_log_info("start handleVoteThreadPoll successful.");
|
||||
return true;
|
||||
}
|
||||
|
||||
// all vote handler must be added into epoll through this function for
|
||||
// ensure the poll thread safety
|
||||
void MonitorVoteHandlerMgr::addHandler(MonitorVoteHandler* handler)
|
||||
{
|
||||
mMutexLock->lock();
|
||||
// poll should be protected by lock
|
||||
handler->addVoteEventToPoll();
|
||||
mVoteHandlers.push_back(handler);
|
||||
mMutexLock->unlock();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// because the number of handlers should be only a little,
|
||||
// so directly to remove from the vector will not to be a big problem
|
||||
void MonitorVoteHandlerMgr::removeHandler(MonitorVoteHandler* handler)
|
||||
{
|
||||
mMutexLock->lock();
|
||||
for (size_t idx = 0; idx < mVoteHandlers.size(); idx++)
|
||||
{
|
||||
if (mVoteHandlers[idx] == handler)
|
||||
{
|
||||
// need to death from the poll
|
||||
handler->removeFromEventPoll();
|
||||
|
||||
delete mVoteHandlers[idx];
|
||||
mVoteHandlers.erase(mVoteHandlers.begin() + idx);
|
||||
mMutexLock->unlock();
|
||||
return;
|
||||
}
|
||||
}
|
||||
mMutexLock->unlock();
|
||||
|
||||
monitor_log_error("handler must be in the container, serious issue.");
|
||||
return;
|
||||
}
|
@ -1,44 +0,0 @@
|
||||
//////////////////////////////////////////////////
|
||||
//
|
||||
// epoll driver that handle client vote request event
|
||||
// created by qiuyu on Nov 27,2018
|
||||
//
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
#ifndef __MONITOR_VOTE_HANDLER_MGR_H__
|
||||
#define __MONITOR_VOTE_HANDLER_MGR_H__
|
||||
|
||||
#include "singleton.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
class CPollThread;
|
||||
class CMutex;
|
||||
class MonitorVoteHandler;
|
||||
|
||||
class MonitorVoteHandlerMgr
|
||||
{
|
||||
private:
|
||||
CPollThread* mVoteEventPoll;
|
||||
CMutex* mMutexLock;
|
||||
std::vector<MonitorVoteHandler*> mVoteHandlers;
|
||||
|
||||
public:
|
||||
MonitorVoteHandlerMgr();
|
||||
~MonitorVoteHandlerMgr();
|
||||
|
||||
static MonitorVoteHandlerMgr* getInstance()
|
||||
{
|
||||
return CSingleton<MonitorVoteHandlerMgr>::Instance();
|
||||
}
|
||||
|
||||
bool startVoteHandlerMgr();
|
||||
|
||||
CPollThread* getThreadPoll() { return mVoteEventPoll; }
|
||||
void addHandler(MonitorVoteHandler* handler);
|
||||
void removeHandler(MonitorVoteHandler* handler);
|
||||
|
||||
private:
|
||||
};
|
||||
|
||||
#endif // __MONITOR_VOTE_HANDLER_MGR_H__
|
@ -1,105 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle detector cluster vote request
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#include "MonitorVoteListener.h"
|
||||
#include "MonitorVoteHandlerMgr.h"
|
||||
#include "MonitorVoteHandler.h"
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <sstream>
|
||||
|
||||
MonitorVoteListener::MonitorVoteListener(CPollThread* poll)
|
||||
:
|
||||
CPollerObject(poll, 0)
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
MonitorVoteListener::~MonitorVoteListener()
|
||||
{
|
||||
}
|
||||
|
||||
int MonitorVoteListener::Bind(int blog)
|
||||
{
|
||||
if ((netfd = SockBind(&mListenAddr, blog, 0, 0, 1/*reuse*/, 1/*nodelay*/, 1/*defer_accept*/)) == -1)
|
||||
{
|
||||
monitor_log_error("bind address failed.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int MonitorVoteListener::attachThread()
|
||||
{
|
||||
EnableInput();
|
||||
int ret = CPollerObject::AttachPoller();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("add event to poll failed.");
|
||||
return -1;
|
||||
}
|
||||
monitor_log_info("add listen event to poll successful, fd:%d", netfd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// handle client connection
|
||||
void MonitorVoteListener::InputNotify()
|
||||
{
|
||||
int newFd;
|
||||
struct sockaddr peer;
|
||||
socklen_t peerSize = sizeof(peer);
|
||||
|
||||
// extracts all the connected connections in the pending queue until return
|
||||
// EAGAIN
|
||||
while (true)
|
||||
{
|
||||
newFd = accept(netfd, (struct sockaddr *)&peer, &peerSize);
|
||||
if (-1 == newFd)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
{
|
||||
// system call "accept" was interrupted by signal before a valid connection
|
||||
// arrived, go on accept
|
||||
continue;
|
||||
}
|
||||
|
||||
if(errno == EAGAIN || errno == EWOULDBLOCK)
|
||||
{
|
||||
// no remaining connection on the pending queue, break out
|
||||
// log_notice("accept new client error: %m, %d", errno);
|
||||
return;
|
||||
}
|
||||
|
||||
// accept error
|
||||
monitor_log_error("accept new client failed, netfd:%d, errno:%d", netfd, errno);
|
||||
return;
|
||||
}
|
||||
|
||||
monitor_log_error("accept new client, newFd:%d", newFd);
|
||||
|
||||
// add the handler vote event to another poll driver
|
||||
CPollThread* poll = MonitorVoteHandlerMgr::getInstance()->getThreadPoll();
|
||||
MonitorVoteHandler * handler = new MonitorVoteHandler(poll, newFd);
|
||||
MonitorVoteHandlerMgr::getInstance()->addHandler(handler);
|
||||
monitor_log_info("create new vote handler successful, fd:%d", newFd);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void MonitorVoteListener::init()
|
||||
{
|
||||
std::stringstream bindAddr;
|
||||
const std::pair<std::string, int>& addr = DtcMonitorConfigMgr::getInstance()->getListenAddr();
|
||||
bindAddr << addr.first << ":" << addr.second << "/tcp";
|
||||
mListenAddr.SetAddress(bindAddr.str().c_str(), (const char*)NULL);
|
||||
|
||||
return;
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle detector cluster vote request
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __MONITOR_VOTE_LISTENER_H__
|
||||
#define __MONITOR_VOTE_LISTENER_H__
|
||||
|
||||
#include "sockaddr.h"
|
||||
#include "poller.h"
|
||||
|
||||
class CPollThread;
|
||||
|
||||
class MonitorVoteListener : public CPollerObject
|
||||
{
|
||||
private:
|
||||
CSocketAddress mListenAddr;
|
||||
|
||||
public:
|
||||
MonitorVoteListener(CPollThread* poll);
|
||||
|
||||
virtual ~MonitorVoteListener();
|
||||
|
||||
int Bind(const int blog = 256);
|
||||
int attachThread();
|
||||
virtual void InputNotify (void);
|
||||
|
||||
private:
|
||||
void init();
|
||||
};
|
||||
|
||||
#endif // __MONITOR_VOTE_LISTENER_H__
|
@ -1,276 +0,0 @@
|
||||
#ifndef __H_TTC_MYSQL_ERROR_H__
|
||||
#define __H_TTC_MYSQL_ERROR_H__
|
||||
enum {
|
||||
ER_HASHCHK=1000,
|
||||
ER_NISAMCHK=1001,
|
||||
ER_NO=1002,
|
||||
ER_YES=1003,
|
||||
ER_CANT_CREATE_FILE=1004,
|
||||
ER_CANT_CREATE_TABLE=1005,
|
||||
ER_CANT_CREATE_DB=1006,
|
||||
ER_DB_CREATE_EXISTS=1007,
|
||||
ER_DB_DROP_EXISTS=1008,
|
||||
ER_DB_DROP_DELETE=1009,
|
||||
ER_DB_DROP_RMDIR=1010,
|
||||
ER_CANT_DELETE_FILE=1011,
|
||||
ER_CANT_FIND_SYSTEM_REC=1012,
|
||||
ER_CANT_GET_STAT=1013,
|
||||
ER_CANT_GET_WD=1014,
|
||||
ER_CANT_LOCK=1015,
|
||||
ER_CANT_OPEN_FILE=1016,
|
||||
ER_FILE_NOT_FOUND=1017,
|
||||
ER_CANT_READ_DIR=1018,
|
||||
ER_CANT_SET_WD=1019,
|
||||
ER_CHECKREAD=1020,
|
||||
ER_DISK_FULL=1021,
|
||||
ER_DUP_KEY=1022,
|
||||
ER_ERROR_ON_CLOSE=1023,
|
||||
ER_ERROR_ON_READ=1024,
|
||||
ER_ERROR_ON_RENAME=1025,
|
||||
ER_ERROR_ON_WRITE=1026,
|
||||
ER_FILE_USED=1027,
|
||||
ER_FILSORT_ABORT=1028,
|
||||
ER_FORM_NOT_FOUND=1029,
|
||||
ER_GET_ERRNO=1030,
|
||||
ER_ILLEGAL_HA=1031,
|
||||
ER_KEY_NOT_FOUND=1032,
|
||||
ER_NOT_FORM_FILE=1033,
|
||||
ER_NOT_KEYFILE=1034,
|
||||
ER_OLD_KEYFILE=1035,
|
||||
ER_OPEN_AS_READONLY=1036,
|
||||
ER_OUTOFMEMORY=1037,
|
||||
ER_OUT_OF_SORTMEMORY=1038,
|
||||
ER_UNEXPECTED_EOF=1039,
|
||||
ER_CON_COUNT_ERROR=1040,
|
||||
ER_OUT_OF_RESOURCES=1041,
|
||||
ER_BAD_HOST_ERROR=1042,
|
||||
ER_HANDSHAKE_ERROR=1043,
|
||||
ER_DBACCESS_DENIED_ERROR=1044,
|
||||
ER_ACCESS_DENIED_ERROR=1045,
|
||||
ER_NO_DB_ERROR=1046,
|
||||
ER_UNKNOWN_COM_ERROR=1047,
|
||||
ER_BAD_NULL_ERROR=1048,
|
||||
ER_BAD_DB_ERROR=1049,
|
||||
ER_TABLE_EXISTS_ERROR=1050,
|
||||
ER_BAD_TABLE_ERROR=1051,
|
||||
ER_NON_UNIQ_ERROR=1052,
|
||||
ER_SERVER_SHUTDOWN=1053,
|
||||
ER_BAD_FIELD_ERROR=1054,
|
||||
ER_WRONG_FIELD_WITH_GROUP=1055,
|
||||
ER_WRONG_GROUP_FIELD=1056,
|
||||
ER_WRONG_SUM_SELECT=1057,
|
||||
ER_WRONG_VALUE_COUNT=1058,
|
||||
ER_TOO_LONG_IDENT=1059,
|
||||
ER_DUP_FIELDNAME=1060,
|
||||
ER_DUP_KEYNAME=1061,
|
||||
ER_DUP_ENTRY=1062,
|
||||
ER_WRONG_FIELD_SPEC=1063,
|
||||
ER_PARSE_ERROR=1064,
|
||||
ER_EMPTY_QUERY=1065,
|
||||
ER_NONUNIQ_TABLE=1066,
|
||||
ER_INVALID_DEFAULT=1067,
|
||||
ER_MULTIPLE_PRI_KEY=1068,
|
||||
ER_TOO_MANY_KEYS=1069,
|
||||
ER_TOO_MANY_KEY_PARTS=1070,
|
||||
ER_TOO_LONG_KEY=1071,
|
||||
ER_KEY_COLUMN_DOES_NOT_EXITS=1072,
|
||||
ER_BLOB_USED_AS_KEY=1073,
|
||||
ER_TOO_BIG_FIELDLENGTH=1074,
|
||||
ER_WRONG_AUTO_KEY=1075,
|
||||
ER_READY=1076,
|
||||
ER_NORMAL_SHUTDOWN=1077,
|
||||
ER_GOT_SIGNAL=1078,
|
||||
ER_SHUTDOWN_COMPLETE=1079,
|
||||
ER_FORCING_CLOSE=1080,
|
||||
ER_IPSOCK_ERROR=1081,
|
||||
ER_NO_SUCH_INDEX=1082,
|
||||
ER_WRONG_FIELD_TERMINATORS=1083,
|
||||
ER_BLOBS_AND_NO_TERMINATED=1084,
|
||||
ER_TEXTFILE_NOT_READABLE=1085,
|
||||
ER_FILE_EXISTS_ERROR=1086,
|
||||
ER_LOAD_INFO=1087,
|
||||
ER_ALTER_INFO=1088,
|
||||
ER_WRONG_SUB_KEY=1089,
|
||||
ER_CANT_REMOVE_ALL_FIELDS=1090,
|
||||
ER_CANT_DROP_FIELD_OR_KEY=1091,
|
||||
ER_INSERT_INFO=1092,
|
||||
ER_INSERT_TABLE_USED=1093,
|
||||
ER_NO_SUCH_THREAD=1094,
|
||||
ER_KILL_DENIED_ERROR=1095,
|
||||
ER_NO_TABLES_USED=1096,
|
||||
ER_TOO_BIG_SET=1097,
|
||||
ER_NO_UNIQUE_LOGFILE=1098,
|
||||
ER_TABLE_NOT_LOCKED_FOR_WRITE=1099,
|
||||
ER_TABLE_NOT_LOCKED=1100,
|
||||
ER_BLOB_CANT_HAVE_DEFAULT=1101,
|
||||
ER_WRONG_DB_NAME=1102,
|
||||
ER_WRONG_TABLE_NAME=1103,
|
||||
ER_TOO_BIG_SELECT=1104,
|
||||
ER_UNKNOWN_ERROR=1105,
|
||||
ER_UNKNOWN_PROCEDURE=1106,
|
||||
ER_WRONG_PARAMCOUNT_TO_PROCEDURE=1107,
|
||||
ER_WRONG_PARAMETERS_TO_PROCEDURE=1108,
|
||||
ER_UNKNOWN_TABLE=1109,
|
||||
ER_FIELD_SPECIFIED_TWICE=1110,
|
||||
ER_INVALID_GROUP_FUNC_USE=1111,
|
||||
ER_UNSUPPORTED_EXTENSION=1112,
|
||||
ER_TABLE_MUST_HAVE_COLUMNS=1113,
|
||||
ER_RECORD_FILE_FULL=1114,
|
||||
ER_UNKNOWN_CHARACTER_SET=1115,
|
||||
ER_TOO_MANY_TABLES=1116,
|
||||
ER_TOO_MANY_FIELDS=1117,
|
||||
ER_TOO_BIG_ROWSIZE=1118,
|
||||
ER_STACK_OVERRUN=1119,
|
||||
ER_WRONG_OUTER_JOIN=1120,
|
||||
ER_NULL_COLUMN_IN_INDEX=1121,
|
||||
ER_CANT_FIND_UDF=1122,
|
||||
ER_CANT_INITIALIZE_UDF=1123,
|
||||
ER_UDF_NO_PATHS=1124,
|
||||
ER_UDF_EXISTS=1125,
|
||||
ER_CANT_OPEN_LIBRARY=1126,
|
||||
ER_CANT_FIND_DL_ENTRY=1127,
|
||||
ER_FUNCTION_NOT_DEFINED=1128,
|
||||
ER_HOST_IS_BLOCKED=1129,
|
||||
ER_HOST_NOT_PRIVILEGED=1130,
|
||||
ER_PASSWORD_ANONYMOUS_USER=1131,
|
||||
ER_PASSWORD_NOT_ALLOWED=1132,
|
||||
ER_PASSWORD_NO_MATCH=1133,
|
||||
ER_UPDATE_INFO=1134,
|
||||
ER_CANT_CREATE_THREAD=1135,
|
||||
ER_WRONG_VALUE_COUNT_ON_ROW=1136,
|
||||
ER_CANT_REOPEN_TABLE=1137,
|
||||
ER_INVALID_USE_OF_NULL=1138,
|
||||
ER_REGEXP_ERROR=1139,
|
||||
ER_MIX_OF_GROUP_FUNC_AND_FIELDS=1140,
|
||||
ER_NONEXISTING_GRANT=1141,
|
||||
ER_TABLEACCESS_DENIED_ERROR=1142,
|
||||
ER_COLUMNACCESS_DENIED_ERROR=1143,
|
||||
ER_ILLEGAL_GRANT_FOR_TABLE=1144,
|
||||
ER_GRANT_WRONG_HOST_OR_USER=1145,
|
||||
ER_NO_SUCH_TABLE=1146,
|
||||
ER_NONEXISTING_TABLE_GRANT=1147,
|
||||
ER_NOT_ALLOWED_COMMAND=1148,
|
||||
ER_SYNTAX_ERROR=1149,
|
||||
ER_DELAYED_CANT_CHANGE_LOCK=1150,
|
||||
ER_TOO_MANY_DELAYED_THREADS=1151,
|
||||
ER_ABORTING_CONNECTION=1152,
|
||||
ER_NET_PACKET_TOO_LARGE=1153,
|
||||
ER_NET_READ_ERROR_FROM_PIPE=1154,
|
||||
ER_NET_FCNTL_ERROR=1155,
|
||||
ER_NET_PACKETS_OUT_OF_ORDER=1156,
|
||||
ER_NET_UNCOMPRESS_ERROR=1157,
|
||||
ER_NET_READ_ERROR=1158,
|
||||
ER_NET_READ_INTERRUPTED=1159,
|
||||
ER_NET_ERROR_ON_WRITE=1160,
|
||||
ER_NET_WRITE_INTERRUPTED=1161,
|
||||
ER_TOO_LONG_STRING=1162,
|
||||
ER_TABLE_CANT_HANDLE_BLOB=1163,
|
||||
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT=1164,
|
||||
ER_DELAYED_INSERT_TABLE_LOCKED=1165,
|
||||
ER_WRONG_COLUMN_NAME=1166,
|
||||
ER_WRONG_KEY_COLUMN=1167,
|
||||
ER_WRONG_MRG_TABLE=1168,
|
||||
ER_DUP_UNIQUE=1169,
|
||||
ER_BLOB_KEY_WITHOUT_LENGTH=1170,
|
||||
ER_PRIMARY_CANT_HAVE_NULL=1171,
|
||||
ER_TOO_MANY_ROWS=1172,
|
||||
ER_REQUIRES_PRIMARY_KEY=1173,
|
||||
ER_NO_RAID_COMPILED=1174,
|
||||
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE=1175,
|
||||
ER_KEY_DOES_NOT_EXITS=1176,
|
||||
ER_CHECK_NO_SUCH_TABLE=1177,
|
||||
ER_CHECK_NOT_IMPLEMENTED=1178,
|
||||
ER_CANT_DO_THIS_DURING_AN_TRANSACTION=1179,
|
||||
ER_ERROR_DURING_COMMIT=1180,
|
||||
ER_ERROR_DURING_ROLLBACK=1181,
|
||||
ER_ERROR_DURING_FLUSH_LOGS=1182,
|
||||
ER_ERROR_DURING_CHECKPOINT=1183,
|
||||
ER_NEW_ABORTING_CONNECTION=1184,
|
||||
ER_DUMP_NOT_IMPLEMENTED= 1185,
|
||||
ER_FLUSH_MASTER_BINLOG_CLOSED=1186,
|
||||
ER_INDEX_REBUILD= 1187,
|
||||
ER_MASTER=1188,
|
||||
ER_MASTER_NET_READ=1189,
|
||||
ER_MASTER_NET_WRITE=1190,
|
||||
ER_FT_MATCHING_KEY_NOT_FOUND=1191,
|
||||
ER_LOCK_OR_ACTIVE_TRANSACTION=1192,
|
||||
ER_UNKNOWN_SYSTEM_VARIABLE=1193,
|
||||
ER_CRASHED_ON_USAGE=1194,
|
||||
ER_CRASHED_ON_REPAIR=1195,
|
||||
ER_WARNING_NOT_COMPLETE_ROLLBACK=1196,
|
||||
ER_TRANS_CACHE_FULL=1197,
|
||||
ER_SLAVE_MUST_STOP=1198,
|
||||
ER_SLAVE_NOT_RUNNING=1199,
|
||||
ER_BAD_SLAVE=1200,
|
||||
ER_MASTER_INFO=1201,
|
||||
ER_SLAVE_THREAD=1202,
|
||||
ER_TOO_MANY_USER_CONNECTIONS=1203,
|
||||
ER_SET_CONSTANTS_ONLY=1204,
|
||||
ER_LOCK_WAIT_TIMEOUT=1205,
|
||||
ER_LOCK_TABLE_FULL=1206,
|
||||
ER_READ_ONLY_TRANSACTION=1207,
|
||||
ER_DROP_DB_WITH_READ_LOCK=1208,
|
||||
ER_CREATE_DB_WITH_READ_LOCK=1209,
|
||||
ER_WRONG_ARGUMENTS=1210,
|
||||
ER_NO_PERMISSION_TO_CREATE_USER=1211,
|
||||
ER_UNION_TABLES_IN_DIFFERENT_DIR=1212,
|
||||
ER_LOCK_DEADLOCK=1213,
|
||||
ER_TABLE_CANT_HANDLE_FULLTEXT=1214,
|
||||
ER_CANNOT_ADD_FOREIGN=1215,
|
||||
ER_NO_REFERENCED_ROW=1216,
|
||||
ER_ROW_IS_REFERENCED=1217,
|
||||
ER_CONNECT_TO_MASTER=1218,
|
||||
ER_QUERY_ON_MASTER=1219,
|
||||
ER_ERROR_WHEN_EXECUTING_COMMAND=1220,
|
||||
ER_WRONG_USAGE=1221,
|
||||
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT=1222,
|
||||
ER_CANT_UPDATE_WITH_READLOCK=1223,
|
||||
ER_MIXING_NOT_ALLOWED=1224,
|
||||
ER_DUP_ARGUMENT=1225,
|
||||
ER_USER_LIMIT_REACHED=1226,
|
||||
ER_SPECIFIC_ACCESS_DENIED_ERROR=1227,
|
||||
ER_LOCAL_VARIABLE=1228,
|
||||
ER_GLOBAL_VARIABLE=1229,
|
||||
ER_NO_DEFAULT=1230,
|
||||
ER_WRONG_VALUE_FOR_VAR=1231,
|
||||
ER_WRONG_TYPE_FOR_VAR=1232,
|
||||
ER_VAR_CANT_BE_READ=1233,
|
||||
ER_CANT_USE_OPTION_HERE=1234,
|
||||
ER_NOT_SUPPORTED_YET=1235,
|
||||
ER_MASTER_FATAL_ERROR_READING_BINLOG=1236,
|
||||
ER_SLAVE_IGNORED_TABLE=1237,
|
||||
ER_INCORRECT_GLOBAL_LOCAL_VAR=1238,
|
||||
CR_UNKNOWN_ERROR=1900,
|
||||
CR_SOCKET_CREATE_ERROR=1901,
|
||||
CR_CONNECTION_ERROR=1902,
|
||||
CR_CONN_HOST_ERROR=1903,
|
||||
CR_IPSOCK_ERROR =1904,
|
||||
CR_UNKNOWN_HOST =1905,
|
||||
CR_VERSION_ERROR=1907,
|
||||
CR_OUT_OF_MEMORY=1908,
|
||||
CR_WRONG_HOST_INFO=1909,
|
||||
CR_LOCALHOST_CONNECTION=1910,
|
||||
CR_TCP_CONNECTION=1911,
|
||||
CR_SERVER_HANDSHAKE_ERR=1912,
|
||||
CR_SERVER_LOST=1913,
|
||||
CR_COMMANDS_OUT_OF_SYNC=1914,
|
||||
CR_NAMEDPIPE_CONNECTION=1915,
|
||||
CR_NAMEDPIPEWAIT_ERROR=1916,
|
||||
CR_NAMEDPIPEOPEN_ERROR=1917,
|
||||
CR_NAMEDPIPESETSTATE_ERROR=1918,
|
||||
CR_CANT_READ_CHARSET=1919,
|
||||
CR_NET_PACKET_TOO_LARGE=1920,
|
||||
CR_EMBEDDED_CONNECTION=1921,
|
||||
CR_PROBE_SLAVE_STATUS=1922,
|
||||
CR_PROBE_SLAVE_HOSTS=1923,
|
||||
CR_PROBE_SLAVE_CONNECT=1924,
|
||||
CR_PROBE_MASTER_CONNECT=1925,
|
||||
CR_SSL_CONNECTION_ERROR=1926,
|
||||
CR_MALFORMED_PACKET=1927,
|
||||
CR_WRONG_LICENSE=1928,
|
||||
|
||||
// mysql client errno
|
||||
// refrence:https://dev.mysql.com/doc/refman/5.7/en/client-error-reference.html
|
||||
CR_SERVER_GONE_ERROR=2006,
|
||||
};
|
||||
#endif
|
@ -1,55 +0,0 @@
|
||||
///////////////////////////////////////////////////////////
|
||||
// //
|
||||
// package the binary posix semaphore which shared //
|
||||
// between threads, not support process communcation //
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __SEM_H__
|
||||
#define __SEM_H__
|
||||
|
||||
#include <time.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
//#include <stdint.h>
|
||||
|
||||
class Sem
|
||||
{
|
||||
private:
|
||||
sem_t mOriSem;
|
||||
|
||||
public:
|
||||
Sem()
|
||||
{
|
||||
sem_init(&mOriSem, 0, 0);
|
||||
}
|
||||
|
||||
inline int semWait()
|
||||
{
|
||||
return sem_wait(&mOriSem);
|
||||
}
|
||||
|
||||
inline int semTryWait()
|
||||
{
|
||||
return sem_trywait(&mOriSem);
|
||||
}
|
||||
|
||||
inline int semTimeWait(const uint64_t& micSeconds)
|
||||
{
|
||||
struct timespec timer;
|
||||
clock_gettime(CLOCK_REALTIME, &timer);
|
||||
|
||||
uint64_t expiredSec = (micSeconds / 1000) + (timer.tv_nsec + (micSeconds % 1000) * 1000 * 1000) / (1000 * 1000 * 1000);
|
||||
timer.tv_sec += expiredSec;
|
||||
timer.tv_nsec = (timer.tv_nsec + (micSeconds % 1000) * 1000 * 1000) % (1000 * 1000 * 1000);
|
||||
|
||||
return sem_timedwait(&mOriSem, &timer);
|
||||
}
|
||||
|
||||
inline int semPost()
|
||||
{
|
||||
return sem_post(&mOriSem);
|
||||
}
|
||||
};
|
||||
|
||||
#endif //__SEM_H__
|
||||
|
@ -1,131 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// Handle detector cluster vote request
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#include "MonitorConnHandler.h"
|
||||
|
||||
MonitorConnHandler::MonitorConnHandler(CPollThread* poll)
|
||||
:
|
||||
CPollerObject(poll, 0),
|
||||
mThreadPoll(poll)
|
||||
{
|
||||
}
|
||||
|
||||
MonitorConnHandler::~MonitorConnHandler()
|
||||
{
|
||||
}
|
||||
|
||||
int MonitorConnHandler::AttachThread()
|
||||
{
|
||||
EnableInput();
|
||||
int ret = CPollerObject::AttachPoller();
|
||||
if (ret < 0)
|
||||
{
|
||||
monitor_log_error("add event to poll failed.");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// handle vote request and send response
|
||||
void MonitorConnHandler::InputNotify()
|
||||
{
|
||||
int recv_len = readSocket((void*)m_RecvBuf,4);
|
||||
if (recv_len == 0)
|
||||
{
|
||||
monitor_log_error("client close the fd.");
|
||||
CPollerObject::DetachPoller();
|
||||
delete this;
|
||||
return;
|
||||
}
|
||||
if (recv_len != 4)
|
||||
{
|
||||
monitor_log_error("revieve package header[cmdtype] error revcd:%d",recv_len);
|
||||
SendResponse(RCV_HEADER_ERR);
|
||||
return;
|
||||
}
|
||||
|
||||
m_CmdType = (MIGRATE_CMD_TYPE) *(int32_t *)m_RecvBuf;
|
||||
|
||||
recv_len = readSocket((void*)m_RecvBuf,4);
|
||||
if (recv_len != 4)
|
||||
{
|
||||
monitor_log_error("revieve package header[xml length] error revcd:%d",recv_len);
|
||||
SendResponse(RCV_HEADER_ERR);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int CMigrateTask::readSocket(void* szBuf, int nLen)
|
||||
{
|
||||
int nRead = 0;
|
||||
int nRet = 0;
|
||||
do {
|
||||
nRet = read(netfd, (char*)szBuf + nRead, nLen - nRead);
|
||||
if (nRet > 0)
|
||||
{
|
||||
nRead += nRet;
|
||||
if(nRead == nLen)
|
||||
return nRead;
|
||||
continue;
|
||||
}
|
||||
else if (nRet == 0)
|
||||
{
|
||||
return nRead;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (errno == EAGAIN || errno == EINTR)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
return nRead;
|
||||
}
|
||||
}
|
||||
}while(nRead < nLen);
|
||||
return nLen;
|
||||
}
|
||||
|
||||
// for broadcast to get votes
|
||||
int CMigrateTask::writeSocket(void* szBuf, int nLen)
|
||||
{
|
||||
int nWrite = 0;
|
||||
int nRet = 0;
|
||||
do {
|
||||
nRet = write(netfd, (char*)szBuf + nWrite, nLen - nWrite);
|
||||
if (nRet > 0)
|
||||
{
|
||||
nWrite += nRet;
|
||||
if(nLen == nWrite)
|
||||
{
|
||||
return nWrite;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (errno == EINTR || errno == EAGAIN)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
return nWrite;
|
||||
}
|
||||
}
|
||||
}
|
||||
while(nWrite < nLen);
|
||||
return nLen;
|
||||
}
|
||||
|
||||
void CMigrateTask::SendResponse(E_MIGRATERSP e_Result)
|
||||
{
|
||||
writeSocket((void*)(&e_Result), sizeof(e_Result) );
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
////////////////////////////////////////////////////////
|
||||
//
|
||||
// connections between cluster using to send vote request
|
||||
// and response
|
||||
// create by qiuyu on Nov 26, 2018
|
||||
//
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __MONITOR_VOTE_HANDLER_H__
|
||||
#define __MONITOR_VOTE_HANDLER_H__
|
||||
|
||||
class MonitorConnHandler : public CPollerObject
|
||||
{
|
||||
private:
|
||||
CPollThread* mThreadPoll,
|
||||
|
||||
public:
|
||||
MonitorConnHandler(CPollThread* poll);
|
||||
|
||||
virtual ~MonitorConnHandler();
|
||||
|
||||
int AttachThread();
|
||||
virtual void InputNotify (void);
|
||||
}
|
||||
|
||||
#endif // __MONITOR_VOTE_HANDLER_H__
|
@ -1,141 +0,0 @@
|
||||
/**
|
||||
* 原子计数类.
|
||||
* Jul 16, 2019
|
||||
* By qiuyu
|
||||
*/
|
||||
|
||||
#ifndef __ATOMIC_H_
|
||||
#define __ATOMIC_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
__BEGIN_DECLS // extern "C"{
|
||||
|
||||
#define ATOMIC_LOCK "lock ; "
|
||||
|
||||
// volatile关键字禁止编译器对修饰的变量进行优化
|
||||
typedef struct { volatile int counter; } my_atomic_t;
|
||||
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
|
||||
#define atomic_set(v,i) (((v)->counter) = (i))
|
||||
|
||||
__END_DECLS // }
|
||||
|
||||
/**
|
||||
* 原子操作类,对int做原子操作
|
||||
*/
|
||||
class Atomic
|
||||
{
|
||||
public:
|
||||
// 原子类型
|
||||
typedef int atomic_type;
|
||||
|
||||
// 构造函数,初始化为0
|
||||
Atomic(atomic_type at = 0)
|
||||
{
|
||||
set(at);
|
||||
}
|
||||
|
||||
Atomic& operator++()
|
||||
{
|
||||
inc();
|
||||
return *this;
|
||||
}
|
||||
|
||||
Atomic& operator--()
|
||||
{
|
||||
dec();
|
||||
return *this;
|
||||
}
|
||||
|
||||
operator atomic_type() const
|
||||
{
|
||||
return get();
|
||||
}
|
||||
|
||||
Atomic& operator+=(atomic_type n)
|
||||
{
|
||||
add(n);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Atomic& operator-=(atomic_type n)
|
||||
{
|
||||
sub(n);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Atomic& operator=(atomic_type n)
|
||||
{
|
||||
set(n);
|
||||
return *this;
|
||||
}
|
||||
|
||||
atomic_type get() const { return mCounterValue.counter; }
|
||||
|
||||
// 添加
|
||||
atomic_type add(atomic_type i) { return add_and_return(i); }
|
||||
|
||||
// 减少
|
||||
atomic_type sub(atomic_type i) { return add_and_return(-i); }
|
||||
|
||||
// 自加1
|
||||
atomic_type inc() { return add(1); }
|
||||
|
||||
// 自减1
|
||||
atomic_type dec() { return sub(1); }
|
||||
|
||||
// 自加1
|
||||
void inc_fast()
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
ATOMIC_LOCK "incl %0"
|
||||
:"=m" (mCounterValue.counter)
|
||||
:"m" (mCounterValue.counter));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 自减1
|
||||
* Atomically decrements @mCounterValue by 1 and returns true if the
|
||||
* result is 0, or false for all other
|
||||
*/
|
||||
bool dec_and_test()
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
ATOMIC_LOCK "decl %0; sete %1"
|
||||
:"=m" (mCounterValue.counter), "=qm" (c)
|
||||
:"m" (mCounterValue.counter) : "memory");
|
||||
|
||||
return c != 0;
|
||||
}
|
||||
|
||||
// 设置值
|
||||
atomic_type set(atomic_type i)
|
||||
{
|
||||
mCounterValue.counter = i;
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
protected:
|
||||
// 增加并返回值
|
||||
int add_and_return(int i)
|
||||
{
|
||||
/* Modern 486+ processor */
|
||||
int __i = i;
|
||||
__asm__ __volatile__(
|
||||
ATOMIC_LOCK "xaddl %0, %1;"
|
||||
:"=r"(i)
|
||||
:"m"(mCounterValue.counter), "0"(i));
|
||||
return i + __i;
|
||||
}
|
||||
|
||||
protected:
|
||||
// 值
|
||||
my_atomic_t mCounterValue;
|
||||
};
|
||||
|
||||
#endif // __ATOMIC_H_
|
@ -1,438 +0,0 @@
|
||||
/**
|
||||
* 智能指针类(智能指针不能相互引用, 否则内存泄漏).
|
||||
*
|
||||
* 所有需要智能指针支持的类都需要从该对象继承,
|
||||
*
|
||||
* 内部采用引用计数Atomic实现,对象可以放在容器中;
|
||||
*
|
||||
* AutoPtrBase为计数器,AutoPtr为智能指针模板类
|
||||
*
|
||||
* Jul 15, 2019
|
||||
* by qiuyu
|
||||
*/
|
||||
|
||||
#ifndef __AUTO_PTR_H_
|
||||
#define __AUTO_PTR_H_
|
||||
|
||||
#include "Atomic.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
/*
|
||||
* 智能指针基类.
|
||||
* */
|
||||
template<class T>
|
||||
class AutoPtrBase
|
||||
{
|
||||
public:
|
||||
|
||||
/** 原子计数类型*/
|
||||
typedef T atomic_type;
|
||||
|
||||
/**
|
||||
* @brief 复制.
|
||||
*
|
||||
* @return HandleBase&
|
||||
*/
|
||||
AutoPtrBase& operator=(const AutoPtrBase&)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 增加计数
|
||||
*/
|
||||
void incRef() { mAtomic.inc_fast(); }
|
||||
|
||||
/**
|
||||
* @brief 减少计数, 当计数==0时, 且需要删除数据时, 释放对象
|
||||
*/
|
||||
void decRef()
|
||||
{
|
||||
if(mAtomic.dec_and_test() && !mNoDelete)
|
||||
{
|
||||
mNoDelete = true;
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 获取计数.
|
||||
*
|
||||
* @return int 计数值
|
||||
*/
|
||||
int getRef() const { return mAtomic.get(); }
|
||||
|
||||
/**
|
||||
* @brief 设置不自动释放.
|
||||
*
|
||||
* @param b 是否自动删除,true or false
|
||||
*/
|
||||
void setNoDelete(bool b) { mNoDelete = b; }
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* @brief 构造函数
|
||||
*/
|
||||
AutoPtrBase() : mAtomic(0), mNoDelete(false)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 拷贝构造
|
||||
*/
|
||||
AutoPtrBase(const AutoPtrBase&) : mAtomic(0), mNoDelete(false)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析够
|
||||
*/
|
||||
virtual ~AutoPtrBase()
|
||||
{
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* 计数
|
||||
*/
|
||||
atomic_type mAtomic;
|
||||
|
||||
/**
|
||||
* 是否自动删除
|
||||
*/
|
||||
bool mNoDelete;
|
||||
};
|
||||
|
||||
template<>
|
||||
inline void AutoPtrBase<int>::incRef()
|
||||
{
|
||||
//__sync_fetch_and_add(&mAtomic,1);
|
||||
++mAtomic;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline void AutoPtrBase<int>::decRef()
|
||||
{
|
||||
//int c = __sync_fetch_and_sub(&mAtomic, 1);
|
||||
//if(c == 1 && !mNoDelete)
|
||||
if(--mAtomic == 0 && !mNoDelete)
|
||||
{
|
||||
mNoDelete = true;
|
||||
delete this;
|
||||
}
|
||||
}
|
||||
|
||||
template<>
|
||||
inline int AutoPtrBase<int>::getRef() const
|
||||
{
|
||||
//return __sync_fetch_and_sub(const_cast<volatile int*>(&mAtomic), 0);
|
||||
return mAtomic;
|
||||
}
|
||||
|
||||
typedef AutoPtrBase<Atomic> HandleBase;
|
||||
|
||||
/**
|
||||
* @brief 智能指针模板类.
|
||||
*
|
||||
* 可以放在容器中,且线程安全的智能指针.
|
||||
*
|
||||
* 通过它定义智能指针,该智能指针通过引用计数实现,
|
||||
*
|
||||
* 可以放在容器中传递.
|
||||
*
|
||||
* template<typename T> T必须继承于HandleBase
|
||||
*/
|
||||
template<typename T>
|
||||
class AutoPtr
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* 元素类型
|
||||
*/
|
||||
typedef T element_type;
|
||||
|
||||
/**
|
||||
* @brief 用原生指针初始化, 计数+1.
|
||||
*
|
||||
* @param p
|
||||
*/
|
||||
AutoPtr(T* p = 0)
|
||||
{
|
||||
mRawPointer = p;
|
||||
|
||||
if(mRawPointer)
|
||||
{
|
||||
mRawPointer->incRef();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 用其他智能指针r的原生指针初始化, 计数+1.
|
||||
*
|
||||
* @param Y
|
||||
* @param r
|
||||
*/
|
||||
/*用Y类型的智能指针对象r来构造当前对象
|
||||
* eg:
|
||||
* AutoPtr<Y> yPtr;
|
||||
* AutoPtr<T> tPtr(yPtr)*/
|
||||
template<typename Y>
|
||||
AutoPtr(const AutoPtr<Y>& r)
|
||||
{
|
||||
mRawPointer = r.mRawPointer;
|
||||
|
||||
if(mRawPointer)
|
||||
{
|
||||
mRawPointer->incRef();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 拷贝构造, 计数+1.
|
||||
*
|
||||
* @param r
|
||||
*/
|
||||
AutoPtr(const AutoPtr& r)
|
||||
{
|
||||
mRawPointer = r.mRawPointer;
|
||||
|
||||
if(mRawPointer)
|
||||
{
|
||||
mRawPointer->incRef();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析构
|
||||
*/
|
||||
~AutoPtr()
|
||||
{
|
||||
if(mRawPointer)
|
||||
{
|
||||
mRawPointer->decRef();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 赋值, 普通指针.
|
||||
*
|
||||
* @param p
|
||||
* @return AutoPtr&
|
||||
*/
|
||||
AutoPtr& operator=(T* p)
|
||||
{
|
||||
if(mRawPointer != p)
|
||||
{
|
||||
if(p)
|
||||
{
|
||||
p->incRef();
|
||||
}
|
||||
|
||||
T* ptr = mRawPointer;
|
||||
mRawPointer = p;
|
||||
|
||||
if(ptr)
|
||||
{
|
||||
ptr->decRef();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 赋值, 其他类型智能指针.
|
||||
*
|
||||
* @param Y
|
||||
* @param r
|
||||
* @return AutoPtr&
|
||||
*/
|
||||
template<typename Y>
|
||||
AutoPtr& operator=(const AutoPtr<Y>& r)
|
||||
{
|
||||
if(mRawPointer != r.mRawPointer)
|
||||
{
|
||||
if(r.mRawPointer)
|
||||
{
|
||||
r.mRawPointer->incRef();
|
||||
}
|
||||
|
||||
T* ptr = mRawPointer;
|
||||
mRawPointer = r.mRawPointer;
|
||||
|
||||
if(ptr)
|
||||
{
|
||||
ptr->decRef();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 赋值, 该类型其他执政指针.
|
||||
*
|
||||
* @param r
|
||||
* @return AutoPtr&
|
||||
*/
|
||||
AutoPtr& operator=(const AutoPtr& r)
|
||||
{
|
||||
if(mRawPointer != r.mRawPointer)
|
||||
{
|
||||
if(r.mRawPointer)
|
||||
{
|
||||
r.mRawPointer->incRef();
|
||||
}
|
||||
|
||||
T* ptr = mRawPointer;
|
||||
mRawPointer = r.mRawPointer;
|
||||
|
||||
if(ptr)
|
||||
{
|
||||
ptr->decRef();
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 将其他类型的智能指针换成当前类型的智能指针.
|
||||
*
|
||||
* @param Y
|
||||
* @param r
|
||||
* @return AutoPtr
|
||||
*/
|
||||
template<class Y>
|
||||
static AutoPtr dynamicCast(const AutoPtr<Y>& r)
|
||||
{
|
||||
return AutoPtr(dynamic_cast<T*>(r.mRawPointer));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 将其他原生类型的指针转换成当前类型的智能指针.
|
||||
*
|
||||
* @param Y
|
||||
* @param p
|
||||
* @return AutoPtr
|
||||
*/
|
||||
template<class Y>
|
||||
static AutoPtr dynamicCast(Y* p)
|
||||
{
|
||||
return AutoPtr(dynamic_cast<T*>(p));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 获取原生指针.
|
||||
*
|
||||
* @return T*
|
||||
*/
|
||||
T* get() const
|
||||
{
|
||||
return mRawPointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 调用.
|
||||
*
|
||||
* @return T*
|
||||
*/
|
||||
T* operator->() const
|
||||
{
|
||||
if(!mRawPointer)
|
||||
{
|
||||
log_error("internal error, raw pointer is null!");
|
||||
}
|
||||
|
||||
return mRawPointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 引用.
|
||||
*
|
||||
* @return T&
|
||||
*/
|
||||
T& operator*() const
|
||||
{
|
||||
if(!mRawPointer)
|
||||
{
|
||||
log_error("internal error, raw pointer is null!");
|
||||
}
|
||||
|
||||
return *mRawPointer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 是否有效.
|
||||
*
|
||||
* @return bool
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
return mRawPointer ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 交换指针.
|
||||
*
|
||||
* @param other
|
||||
*/
|
||||
void swap(AutoPtr& other)
|
||||
{
|
||||
std::swap(mRawPointer, other.mRawPointer);
|
||||
}
|
||||
|
||||
public:
|
||||
T* mRawPointer;
|
||||
};
|
||||
|
||||
template<typename T, typename U>
|
||||
inline bool operator==(const AutoPtr<T>& lhs, const AutoPtr<U>& rhs)
|
||||
{
|
||||
T* l = lhs.get();
|
||||
U* r = rhs.get();
|
||||
if(l && r)
|
||||
{
|
||||
return *l == *r;
|
||||
}
|
||||
else
|
||||
{
|
||||
return !l && !r;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, typename U>
|
||||
inline bool operator!=(const AutoPtr<T>& lhs, const AutoPtr<U>& rhs)
|
||||
{
|
||||
T* l = lhs.get();
|
||||
U* r = rhs.get();
|
||||
if(l && r)
|
||||
{
|
||||
return *l != *r;
|
||||
}
|
||||
else
|
||||
{
|
||||
return l || r;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 小于判断, 用于放在map等容器中.
|
||||
*/
|
||||
template<typename T, typename U>
|
||||
inline bool operator<(const AutoPtr<T>& lhs, const AutoPtr<U>& rhs)
|
||||
{
|
||||
T* l = lhs.get();
|
||||
U* r = rhs.get();
|
||||
if(l && r)
|
||||
{
|
||||
return *l < *r;
|
||||
}
|
||||
else
|
||||
{
|
||||
return !l && r;
|
||||
}
|
||||
}
|
||||
#endif // __AUTO_PTR_H_
|
@ -1,359 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Task Pool, thread unsafe.
|
||||
// Author:qiuyu
|
||||
// Date:Jul 12th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ConcurrTransExecutor.h"
|
||||
|
||||
#define MAX_POOL_CAPACITY 1000000
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// BasicRequest relevant
|
||||
//
|
||||
/////////////////////////////////////////////////////////////
|
||||
BasicRequest::BasicRequest()
|
||||
:
|
||||
mTransactionId(0),
|
||||
mExpiredWhen(-1), // never timeout
|
||||
mSem(NULL),
|
||||
mHeadRequest(NULL),
|
||||
mNextRequest(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
BasicRequest::~BasicRequest()
|
||||
{
|
||||
mSem = NULL;
|
||||
mHeadRequest = NULL;
|
||||
mNextRequest =NULL;
|
||||
}
|
||||
|
||||
void BasicRequest::setErrorCode(BasicRequest::RequestError errCode)
|
||||
{
|
||||
if (!mHeadRequest)
|
||||
{
|
||||
log_error("head pointer can not be empty!");
|
||||
return;
|
||||
}
|
||||
|
||||
int errValue = 0;
|
||||
switch (errCode)
|
||||
{
|
||||
default:
|
||||
case eProcFailed:
|
||||
errValue = ((errValue + eProcFailed) << 16);
|
||||
break;
|
||||
case eTimeout:
|
||||
errValue = ((errValue + eTimeout) << 16);
|
||||
break;
|
||||
}
|
||||
log_info("set err code:%d", errValue);
|
||||
mHeadRequest->mFinishedReqNum.add(errValue);
|
||||
mHeadRequest = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int BasicRequest::procRequestPrev()
|
||||
{
|
||||
// call the real implementation function
|
||||
int ret = procRequest();
|
||||
if (ret < 0)
|
||||
{
|
||||
mHeadRequest = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!mHeadRequest)
|
||||
{
|
||||
log_error("Head request can not be Null!");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// if the request is sync, just return and the main thread will do the procResponse
|
||||
if (mSem)
|
||||
{
|
||||
log_info("sync trans, transId:%" PRIu64, mTransactionId);
|
||||
mHeadRequest = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// async trans, need to collect the response by ourselves
|
||||
Atomic rValue = mHeadRequest->mFinishedReqNum.inc();
|
||||
if (((int)rValue >> 16) != 0)
|
||||
{
|
||||
log_error("Trans has failed, transId:%" PRIu64, mTransactionId);
|
||||
mHeadRequest = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((int)rValue != (int)TASK_NUM(mTransactionId))
|
||||
{
|
||||
mHeadRequest = NULL;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// trans finished, collect result
|
||||
std::vector<BasicRequestPtr> results;
|
||||
BasicRequestPtr curr = mHeadRequest;
|
||||
// log_error("request ref:%d", curr.mRawPointer->getRef());
|
||||
while (curr)
|
||||
{
|
||||
results.push_back(curr);
|
||||
// log_error("request ref:%d", curr.mRawPointer->getRef());
|
||||
curr = curr->mNextRequest;
|
||||
}
|
||||
mHeadRequest = NULL;
|
||||
|
||||
return procResponsePrev(results);
|
||||
}
|
||||
|
||||
int BasicRequest::procResponsePrev(std::vector<BasicRequestPtr> &results)
|
||||
{
|
||||
// check all request state first
|
||||
if (results.size() <= 0)
|
||||
{
|
||||
log_error("invalid response value!");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// header request must be the first one
|
||||
if (((int)results[0]->mFinishedReqNum >> 16) != 0)
|
||||
{
|
||||
log_error("Trans has failed, transId:%" PRIu64, mTransactionId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return procResponse(results);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// ConcurrTransExecutor relevant
|
||||
//
|
||||
/////////////////////////////////////////////////////////////
|
||||
ConcurrTransExecutor::ConcurrTransExecutor()
|
||||
:
|
||||
mBaseTransacId(1)
|
||||
{
|
||||
log_info("construct ConcurrTransExecutor!");
|
||||
// create thread pool
|
||||
// user configurable with config(CPU-bound and IO-bound)
|
||||
int threadNum = 3;
|
||||
for (int idx = 0; idx < threadNum; idx++)
|
||||
{
|
||||
mThreadPool.push_back(new ThreadShell(this));
|
||||
mThreadPool[idx]->start();
|
||||
}
|
||||
|
||||
// user configurable
|
||||
size_t cap = 10000000;
|
||||
mTransPoolMaxCapacity = cap > MAX_POOL_CAPACITY ? MAX_POOL_CAPACITY : cap;
|
||||
}
|
||||
|
||||
ConcurrTransExecutor::~ConcurrTransExecutor()
|
||||
{
|
||||
terminatePoolThread();
|
||||
// wait for all thread finished(the normal way is to call join)
|
||||
sleep(3);
|
||||
// log_info("deconstruct ConcurrTransExecutor!");
|
||||
}
|
||||
|
||||
bool
|
||||
ConcurrTransExecutor::executeTransAsync(std::vector<BasicRequestPtr> trans)
|
||||
{
|
||||
size_t transNum = trans.size();
|
||||
int ret = isPoolOverload(transNum);
|
||||
if (ret >= 0)
|
||||
{
|
||||
// not overload
|
||||
}
|
||||
else if (ret == -1)
|
||||
{
|
||||
// current pool size more than 3/4 of the capacity, pay attention to it
|
||||
log_error("request too fast, trans pool will be full soon, pay attention!");
|
||||
}
|
||||
else
|
||||
{
|
||||
// pool is full, discard this trans
|
||||
log_error("trans pool is full, need to limit the inputs, discard this request!");
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t transId = createTransId(transNum);
|
||||
if (transId == 0)
|
||||
{
|
||||
log_error("get trans id faield, size:%zu, currTransId:%" PRIu64, transNum, mBaseTransacId);
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
trans[idx]->setSem(NULL);
|
||||
trans[idx]->setTransId(transId);
|
||||
trans[idx]->bindLinkList(trans[0], (idx + 1) == transNum ? (BasicRequest*)NULL : trans[idx + 1]);
|
||||
mTransactionPool.push_back(trans[idx]);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ConcurrTransExecutor::executeTransSync(
|
||||
std::vector<BasicRequestPtr>& trans,
|
||||
int64_t msTimeout)
|
||||
{
|
||||
size_t transNum = trans.size();
|
||||
int ret = isPoolOverload(transNum);
|
||||
if (ret >= 0)
|
||||
{
|
||||
// not overload
|
||||
}
|
||||
else if (ret == -1)
|
||||
{
|
||||
// current pool size more than 3/4 of the capacity, pay attention to it
|
||||
log_error("request too fast, trans pool will be full soon, pay attention!");
|
||||
}
|
||||
else
|
||||
{
|
||||
// pool is full, discard this trans
|
||||
log_error("trans pool is full, need to limit the inputs, discard this request!");
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t transId = createTransId(transNum);
|
||||
if (transId == 0)
|
||||
{
|
||||
log_error("get trans id faield, size:%zu, currTransId:%" PRIu64, transNum, mBaseTransacId);
|
||||
return false;
|
||||
}
|
||||
|
||||
SemPtr sem = new Sem();
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
trans[idx]->setSem(sem);
|
||||
trans[idx]->setTimeout(msTimeout);
|
||||
trans[idx]->setTransId(transId);
|
||||
trans[idx]->bindLinkList(trans[0], (idx + 1) == transNum ? (BasicRequest*)NULL : trans[idx + 1]);
|
||||
// log_error("request ref:%d", trans[0].mRawPointer->getRef());
|
||||
mTransactionPool.push_back(trans[idx]);
|
||||
}
|
||||
|
||||
// wait for the trans been executed
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
ret = trans[idx]->timeWait(msTimeout);
|
||||
if (ret < 0)
|
||||
{
|
||||
log_error("proc request failed. errno:%d", errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// proc response here, use header request to collect the result
|
||||
if (!trans[0])
|
||||
{
|
||||
log_error("internal error! header request can not be NULL.");
|
||||
return false;
|
||||
}
|
||||
ret = trans[0]->procResponsePrev(trans);
|
||||
|
||||
return ret < 0 ? false : true;
|
||||
}
|
||||
|
||||
BasicRequestPtr
|
||||
ConcurrTransExecutor::getTask()
|
||||
{
|
||||
BasicRequestPtr task;
|
||||
bool rslt = mTransactionPool.pop_front(task, -1);
|
||||
if (!rslt)
|
||||
{
|
||||
// when stop the thread, thread shell will wake up form the queue with false
|
||||
// log_error("get task from queue failed.");
|
||||
return NULL;
|
||||
}
|
||||
// log_error("request ref:%d", task.mRawPointer->getRef());
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
void ConcurrTransExecutor::terminatePoolThread()
|
||||
{
|
||||
// stop thread
|
||||
for (size_t idx = 0; idx < mThreadPool.size(); idx++)
|
||||
{
|
||||
mThreadPool[idx]->terminate();
|
||||
}
|
||||
|
||||
// trigger those thread sleep on the queue
|
||||
mTransactionPool.notifyT();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int ConcurrTransExecutor::isPoolOverload(size_t currTransNum)
|
||||
{
|
||||
size_t currPoolSize = mTransactionPool.size();
|
||||
size_t totalSize = currPoolSize + currTransNum;
|
||||
if (totalSize <= (mTransPoolMaxCapacity * 3 / 4))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
else if (totalSize > mTransPoolMaxCapacity)
|
||||
{
|
||||
return -2;
|
||||
}
|
||||
|
||||
// need to limit the request speed
|
||||
return -1;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// ThreadShell relevant
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
void
|
||||
ConcurrTransExecutor::ThreadShell::run()
|
||||
{
|
||||
while (mThreadState != Thread::eExited)
|
||||
{
|
||||
// if no task in the queue, get function will sink into block
|
||||
BasicRequestPtr oneTask = mOwner->getTask();
|
||||
if (!oneTask)
|
||||
{
|
||||
// log_info("internal error, get empty task from queue! threadState:%d", mThreadState);
|
||||
continue;
|
||||
}
|
||||
// log_error("request ref:%d", oneTask.mRawPointer->getRef());
|
||||
uint64_t currTransId = oneTask->getTransId();
|
||||
log_info("proc transaction, transId:%" PRIu64, currTransId);
|
||||
// check timeout
|
||||
bool rslt = oneTask->isTimeout();
|
||||
if (rslt)
|
||||
{
|
||||
// mark this transaction as timeout
|
||||
oneTask->setErrorCode(BasicRequest::eTimeout);
|
||||
oneTask->wakeUp();
|
||||
log_error("proc task timeout! transId:%" PRIu64, currTransId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// proc task
|
||||
int ret = oneTask->procRequestPrev();
|
||||
if (ret < 0)
|
||||
{
|
||||
oneTask->setErrorCode(BasicRequest::eProcFailed);
|
||||
oneTask->wakeUp();
|
||||
log_error("proc request failed, transId:%" PRIu64, currTransId);
|
||||
continue;
|
||||
}
|
||||
|
||||
oneTask->wakeUp();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Task Pool, thread unsafe.
|
||||
// Author:qiuyu
|
||||
// Date:Jul 12th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __CONCURRENT_TRANSACTION_EXECUTOR_H_
|
||||
#define __CONCURRENT_TRANSACTION_EXECUTOR_H_
|
||||
|
||||
#include "Sem.h"
|
||||
#include "ThreadQueue.h"
|
||||
#include "SingletonBase.h"
|
||||
#include "Thread.h"
|
||||
#include "timestamp.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
|
||||
// can never larger than 16
|
||||
#define TASK_NUM_SHIFT 8
|
||||
#define MAX_TRANS_ID ((1ULL << (64 - TASK_NUM_SHIFT)) - 1)
|
||||
#define SIZE_VALID(size) \
|
||||
( \
|
||||
{ \
|
||||
bool rslt = true; \
|
||||
if (size <= 0 || size > (1ULL << TASK_NUM_SHIFT)) \
|
||||
rslt = false; \
|
||||
rslt; \
|
||||
} \
|
||||
)
|
||||
|
||||
#define TASK_NUM(transId) (transId & ((1ULL << TASK_NUM_SHIFT) - 1))
|
||||
|
||||
class BasicRequest;
|
||||
typedef AutoPtr<BasicRequest> BasicRequestPtr;
|
||||
|
||||
// user need to override this class
|
||||
class BasicRequest : public HandleBase
|
||||
{
|
||||
private:
|
||||
uint64_t mTransactionId;
|
||||
uint64_t mExpiredWhen;
|
||||
SemPtr mSem; // Sem is NULL means async trans
|
||||
|
||||
// the high 16 bits for Error code and the others for task counter
|
||||
Atomic mFinishedReqNum;
|
||||
BasicRequestPtr mHeadRequest;
|
||||
BasicRequestPtr mNextRequest;
|
||||
|
||||
public:
|
||||
enum RequestError
|
||||
{
|
||||
// Notice, err code can not be manual set, must be increment automaticly
|
||||
eTimeout = 1,
|
||||
eProcFailed
|
||||
};
|
||||
|
||||
public:
|
||||
BasicRequest();
|
||||
virtual ~BasicRequest();
|
||||
|
||||
private:
|
||||
// user override function
|
||||
virtual int procRequest() = 0;
|
||||
// @Return value: success 0, error return -1
|
||||
virtual int procResponse(std::vector<BasicRequestPtr>& requests) = 0;
|
||||
|
||||
public:
|
||||
void setTransId(uint64_t transId) { mTransactionId = transId; }
|
||||
uint64_t getTransId() { return mTransactionId; }
|
||||
|
||||
void setSem(const SemPtr sem) { mSem = sem; }
|
||||
int timeWait(const int64_t msTimeout) { return mSem->semTimeWait(msTimeout); }
|
||||
void wakeUp() { if (mSem) mSem->semPost(); }
|
||||
|
||||
void setTimeout(int64_t timeout)
|
||||
{
|
||||
mExpiredWhen = timeout < 0 ? (uint64_t)-1
|
||||
: (timeout + GET_TIMESTAMP() < timeout ? (uint64_t)-1 : timeout + GET_TIMESTAMP());
|
||||
}
|
||||
bool isTimeout() { return (uint64_t)GET_TIMESTAMP() >= mExpiredWhen; }
|
||||
|
||||
void setErrorCode(BasicRequest::RequestError errCode);
|
||||
|
||||
private:
|
||||
void bindLinkList(
|
||||
BasicRequestPtr header,
|
||||
BasicRequestPtr next)
|
||||
{
|
||||
mHeadRequest = header;
|
||||
mNextRequest = next;
|
||||
}
|
||||
|
||||
int procRequestPrev();
|
||||
int procResponsePrev(std::vector<BasicRequestPtr> &results);
|
||||
|
||||
friend class ConcurrTransExecutor;
|
||||
};
|
||||
|
||||
class ConcurrTransExecutor : public SingletonBase<ConcurrTransExecutor>
|
||||
{
|
||||
private:
|
||||
class ThreadShell : public Thread , public HandleBase
|
||||
{
|
||||
private:
|
||||
ConcurrTransExecutor *mOwner;
|
||||
|
||||
public:
|
||||
ThreadShell(ConcurrTransExecutor *owner)
|
||||
:
|
||||
mOwner(owner)
|
||||
{
|
||||
// log_info("Construct ThreadShell");
|
||||
}
|
||||
|
||||
virtual ~ThreadShell()
|
||||
{
|
||||
// log_info("deconstruct ThreadShell");
|
||||
}
|
||||
virtual void run();
|
||||
|
||||
void terminate() { mThreadState = Thread::eExited; }
|
||||
};
|
||||
typedef AutoPtr<ThreadShell> ThreadShellPtr;
|
||||
|
||||
private:
|
||||
uint64_t mBaseTransacId; // start with 1
|
||||
std::vector<ThreadShellPtr> mThreadPool;
|
||||
|
||||
size_t mTransPoolMaxCapacity;
|
||||
ThreadQueue<BasicRequestPtr> mTransactionPool;
|
||||
|
||||
public:
|
||||
ConcurrTransExecutor();
|
||||
virtual ~ConcurrTransExecutor();
|
||||
|
||||
bool executeTransAsync(std::vector<BasicRequestPtr> trans);
|
||||
|
||||
// timeout -1 for wait forever
|
||||
bool executeTransSync(
|
||||
std::vector<BasicRequestPtr>& trans,
|
||||
int64_t msTimeout);
|
||||
|
||||
private:
|
||||
inline uint64_t createTransId(size_t taskNum)
|
||||
{
|
||||
bool rslt = SIZE_VALID(taskNum);
|
||||
if (!rslt) return 0;
|
||||
|
||||
uint64_t transId = mBaseTransacId << TASK_NUM_SHIFT;
|
||||
transId += taskNum;
|
||||
mBaseTransacId = (mBaseTransacId >= MAX_TRANS_ID ? 1 : ++mBaseTransacId);
|
||||
|
||||
return transId;
|
||||
}
|
||||
|
||||
BasicRequestPtr getTask();
|
||||
|
||||
void terminatePoolThread();
|
||||
int isPoolOverload(size_t currTransNum);
|
||||
};
|
||||
|
||||
#endif // __CONCURRENT_TRANSACTION_EXECUTOR_H_
|
@ -1,85 +0,0 @@
|
||||
// #include "../Atomic.h"
|
||||
// #include "../Thread.h"
|
||||
#include "TransactionExecutor.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <bitset>
|
||||
using namespace std;
|
||||
|
||||
const int threadNum = 7;
|
||||
const int gLoopNum = 770000;
|
||||
bool threadFin[threadNum] = {false};
|
||||
// bitset<gLoopNum + 1> gBitValue;
|
||||
char* gBitValue = (char*)malloc(gLoopNum * threadNum + 1);
|
||||
Atomic gThreadValue;
|
||||
|
||||
class MyThread : public Thread
|
||||
{
|
||||
int threadIdx;
|
||||
public:
|
||||
MyThread(int idx) : threadIdx(idx){}
|
||||
void run()
|
||||
{
|
||||
int loopNum = gLoopNum;
|
||||
while (loopNum--)
|
||||
{
|
||||
Atomic rValue = gThreadValue.inc();
|
||||
//gBitValue.set(rValue.get(), 1);
|
||||
gBitValue[rValue.get()] = 1;
|
||||
}
|
||||
threadFin[threadIdx] = true;
|
||||
}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
std::vector<MyThread*> threadPool;
|
||||
for (int i = 0; i < threadNum; i++)
|
||||
{
|
||||
threadPool.push_back(new MyThread(i));
|
||||
threadPool[i]->start();
|
||||
}
|
||||
|
||||
// wait for all thread finish
|
||||
int finishedNum = 0;
|
||||
while (finishedNum != threadNum)
|
||||
{
|
||||
sleep(3);
|
||||
|
||||
for (size_t i = 0; i < threadNum; i++)
|
||||
{
|
||||
if (threadFin[i]) finishedNum++;
|
||||
}
|
||||
}
|
||||
|
||||
// check result
|
||||
int res = gThreadValue.get();
|
||||
if (threadNum * gLoopNum != res)
|
||||
{
|
||||
cout << "test failed. gThreadValue:" << res << " corrValue:" << threadNum * gLoopNum << endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
// set the first bit to 1
|
||||
// gBitValue[0] = 1;
|
||||
// all bit must be 1
|
||||
// if (!gBitValue.all())
|
||||
cout << "test return value begin!" << endl;
|
||||
for (size_t i = 1; i <= gLoopNum * threadNum; i++)
|
||||
{
|
||||
if (!gBitValue[i])
|
||||
{
|
||||
cout << "Test failed in bitmap!" << endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
// cout << "test successful. gThreadValue:" << res << " corrValue:" << threadNum * gLoopNum << endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cout << "test successful. gThreadValue:" << res << " corrValue:" << threadNum * gLoopNum << endl;
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
#include "ConcurrTransExecutor.h"
|
||||
|
||||
static int32_t gInitValue = 0;
|
||||
static int64_t gCoalesceValue = 0;
|
||||
|
||||
#define TEST_MEM_LEAK 0
|
||||
|
||||
class Request : public BasicRequest
|
||||
{
|
||||
private:
|
||||
char *mValue;
|
||||
|
||||
public:
|
||||
Request()
|
||||
{
|
||||
// log_error("construct request. this:%x", this);
|
||||
}
|
||||
virtual ~Request()
|
||||
{
|
||||
// log_error("deconstruct request. this:%x", this);
|
||||
delete mValue;
|
||||
}
|
||||
|
||||
int procRequest()
|
||||
{
|
||||
#if TEST_MEM_LEAK
|
||||
// allocate mem here and release in the response
|
||||
mValue = (char*)malloc(1 << 20); // 1M
|
||||
#else
|
||||
mValue = (char*)malloc(4);
|
||||
*(int*)mValue = gInitValue + 1;
|
||||
log_info("proc finished, value:%d", *(int*)mValue);
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int procResponse(std::vector<BasicRequestPtr>& requests)
|
||||
{
|
||||
log_info("proc trans response!");
|
||||
#if TEST_MEM_LEAK
|
||||
#else
|
||||
for (size_t idx = 0; idx < requests.size(); idx++)
|
||||
{
|
||||
gCoalesceValue += *(int*)(static_cast<Request*>(requests[idx].get())->mValue);
|
||||
// log_error("request ref:%d", requests[idx].mRawPointer->getRef());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
void createLogFile()
|
||||
{
|
||||
const std::string& logFilePath = "./log";
|
||||
_init_log_("TransactionPool", logFilePath.c_str());
|
||||
_set_log_level_(3);
|
||||
|
||||
if (access(logFilePath.c_str(), F_OK != 0))
|
||||
{
|
||||
std::string cmd = "mkdir " + logFilePath;
|
||||
system(cmd.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
log_info("directory exist!!!!");
|
||||
}
|
||||
|
||||
// check whether create successful
|
||||
if (access(logFilePath.c_str(), W_OK | X_OK) < 0)
|
||||
{
|
||||
log_error("create log file directory failed. path:%s", logFilePath.c_str());
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void limitCurrProcessCpuUsage(
|
||||
const struct timeval &tStart,
|
||||
const float usage)
|
||||
{
|
||||
int64_t tElapse;
|
||||
struct timeval tEnd, tSleep;
|
||||
|
||||
gettimeofday(&tEnd, NULL);
|
||||
tElapse = (tEnd.tv_sec - tStart.tv_sec) * 1000000 + (tEnd.tv_usec - tStart.tv_usec);
|
||||
tElapse *= usage;
|
||||
|
||||
tSleep.tv_sec = tElapse / 1000000;
|
||||
tSleep.tv_usec = tElapse % 1000000;
|
||||
|
||||
select(0, NULL, NULL, NULL, &tSleep);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(NULL);
|
||||
|
||||
createLogFile();
|
||||
daemon(1, 0);
|
||||
|
||||
// begin to test the transaction pool
|
||||
struct timeval tv;
|
||||
#if 1
|
||||
int loopNum = 100000000;
|
||||
#else
|
||||
int loopNum = 1;
|
||||
#endif
|
||||
int64_t tBegin = GET_TIMESTAMP();
|
||||
while (loopNum--)
|
||||
{
|
||||
static int curLoop = 0;
|
||||
|
||||
// cpu usage limitation
|
||||
gettimeofday(&tv, NULL);
|
||||
|
||||
std::vector<BasicRequestPtr> reqs;
|
||||
int requestNum = 10;
|
||||
for (int i = 0; i < requestNum; i++)
|
||||
{
|
||||
reqs.push_back(new Request());
|
||||
}
|
||||
gInitValue = random() % 10000;
|
||||
// log_error("request ref:%d", reqs[0].mRawPointer->getRef());
|
||||
|
||||
ConcurrTransExecutor::getInstance()->executeTransSync(reqs, -1);
|
||||
|
||||
// log_error("request ref:%d", reqs[0].mRawPointer->getRef());
|
||||
|
||||
#if TEST_MEM_LEAK
|
||||
#else
|
||||
// check result
|
||||
int64_t targetValue = requestNum * (gInitValue + 1);
|
||||
// int64_t targetValue = requestNum * (gInitValue++);
|
||||
if (targetValue != gCoalesceValue)
|
||||
{
|
||||
log_error("run test failed. initValue:%lld, coalValue:%lld, targValue:%lld", gInitValue, gCoalesceValue, targetValue);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
gCoalesceValue = 0;
|
||||
curLoop++;
|
||||
// log_error("runing step:%d", curLoop);
|
||||
|
||||
// limitCurrProcessCpuUsage(tv, 0.6);
|
||||
}
|
||||
int64_t tEnd = GET_TIMESTAMP();
|
||||
|
||||
/*
|
||||
* Summary:
|
||||
* CPU-bound:
|
||||
* ThreadNum DataNum TimeCost(us)
|
||||
* DoubleLock 3 100w [184018359 204070493]
|
||||
* DoubleLock 7(2*core) 100w [180809002 ...]
|
||||
* ResponseUnLock 3 100w 120886830
|
||||
* ResponseUnLock 7 100w 155974665
|
||||
* */
|
||||
log_error("run test success! timeCost:%ld", tEnd - tBegin);
|
||||
/*while (true)
|
||||
{
|
||||
sleep(100);
|
||||
}*/
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
#include "TransactionExecutor.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <bitset>
|
||||
using namespace std;
|
||||
|
||||
const int threadNum = 10;
|
||||
|
||||
class MyThread : public Thread
|
||||
{
|
||||
int _threadIdx;
|
||||
const ThreadMutex* _mutex;
|
||||
ThreadCond* _cond;
|
||||
|
||||
public:
|
||||
MyThread(int idx, const ThreadMutex* mutex, ThreadCond* cond)
|
||||
:
|
||||
_threadIdx(idx),
|
||||
_mutex(mutex),
|
||||
_cond(cond)
|
||||
{
|
||||
}
|
||||
|
||||
void run()
|
||||
{
|
||||
// wait for the condition
|
||||
_mutex->lock();
|
||||
_cond->wait<ThreadMutex>(*_mutex);
|
||||
cout << "wake up from signal, thread:" << _threadIdx << endl;
|
||||
_mutex->unlock();
|
||||
}
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
/*
|
||||
* Summary:
|
||||
* the test result proving that, one mutex can bind to serval
|
||||
* condtions
|
||||
* */
|
||||
ThreadMutex *mutex = new ThreadMutex();
|
||||
|
||||
std::vector<ThreadCond*> conds;
|
||||
for (int i = 0; i < threadNum; i++)
|
||||
{
|
||||
conds.push_back(new ThreadCond());
|
||||
}
|
||||
|
||||
std::vector<MyThread*> threadPool;
|
||||
for (int i = 0; i < threadNum; i++)
|
||||
{
|
||||
threadPool.push_back(new MyThread(i, mutex, conds[i]));
|
||||
threadPool[i]->start();
|
||||
}
|
||||
|
||||
// sleep a while for child thread ready
|
||||
sleep(3);
|
||||
|
||||
// signal them
|
||||
for (size_t idx = 0; idx < conds.size(); idx++)
|
||||
{
|
||||
conds[idx]->signal();
|
||||
// conds[idx]->signal();
|
||||
break;
|
||||
// cout << "singnal it!" << endl;
|
||||
// sleep(1);
|
||||
}
|
||||
|
||||
sleep(1);
|
||||
// mutex->lock();
|
||||
conds[0]->wait<ThreadMutex>(*mutex);
|
||||
mutex->unlock();
|
||||
cout << "================" << endl;
|
||||
|
||||
// waitting for the print message in thread
|
||||
while (true)
|
||||
{
|
||||
sleep(3);
|
||||
}
|
||||
cout << "test finished." << endl;
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,163 +0,0 @@
|
||||
#include "TransactionExecutor.h"
|
||||
|
||||
static int64_t gInitValue = 0;
|
||||
static int64_t gCoalesceValue = 0;
|
||||
|
||||
#define TEST_MEM_LEAK 0
|
||||
|
||||
class Response : public BasicResponse
|
||||
{
|
||||
private:
|
||||
int64_t mCounterValue;
|
||||
|
||||
public:
|
||||
Response() : mCounterValue(0) {}
|
||||
|
||||
virtual int procResponse(std::deque<BasicResponsePtr>& response)
|
||||
{
|
||||
while (!response.empty())
|
||||
{
|
||||
BasicResponsePtr resp = response.front();
|
||||
#if TEST_MEM_LEAK
|
||||
char *val = (char*)(static_cast<Response*>(resp.get())->mCounterValue);
|
||||
delete val;
|
||||
#else
|
||||
gCoalesceValue += static_cast<Response*>(resp.get())->mCounterValue;
|
||||
#endif
|
||||
response.pop_front();
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void setValue(const int64_t value) { mCounterValue = value; }
|
||||
};
|
||||
|
||||
class Request : public BasicRequest
|
||||
{
|
||||
public:
|
||||
Request() {}
|
||||
|
||||
int procRequest(BasicResponsePtr& resp)
|
||||
{
|
||||
resp = new Response();
|
||||
#if TEST_MEM_LEAK
|
||||
// allocate mem here and release in the response
|
||||
char *val = new char(1 << 20); // 1M
|
||||
static_cast<Response*>(resp.get())->setValue((int64_t)val);
|
||||
#else
|
||||
int64_t val = gInitValue + 1;
|
||||
static_cast<Response*>(resp.get())->setValue(val);
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
|
||||
void createLogFile()
|
||||
{
|
||||
const std::string& logFilePath = "./log";
|
||||
_init_log_("TransactionPool", logFilePath.c_str());
|
||||
_set_log_level_(3);
|
||||
|
||||
if (access(logFilePath.c_str(), F_OK != 0))
|
||||
{
|
||||
std::string cmd = "mkdir " + logFilePath;
|
||||
system(cmd.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
log_info("directory exist!!!!");
|
||||
}
|
||||
|
||||
// check whether create successful
|
||||
if (access(logFilePath.c_str(), W_OK | X_OK) < 0)
|
||||
{
|
||||
log_error("create log file directory failed. path:%s", logFilePath.c_str());
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void limitCurrProcessCpuUsage(
|
||||
const struct timeval &tStart,
|
||||
const float usage)
|
||||
{
|
||||
int64_t tElapse;
|
||||
struct timeval tEnd, tSleep;
|
||||
|
||||
gettimeofday(&tEnd, NULL);
|
||||
tElapse = (tEnd.tv_sec - tStart.tv_sec) * 1000000 + (tEnd.tv_usec - tStart.tv_usec);
|
||||
tElapse *= usage;
|
||||
|
||||
tSleep.tv_sec = tElapse / 1000000;
|
||||
tSleep.tv_usec = tElapse % 1000000;
|
||||
|
||||
select(0, NULL, NULL, NULL, &tSleep);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
srand(NULL);
|
||||
|
||||
createLogFile();
|
||||
daemon(1, 0);
|
||||
|
||||
// begin to test the transaction pool
|
||||
struct timeval tv;
|
||||
#if 1
|
||||
int loopNum = 1000000;
|
||||
#else
|
||||
int loopNum = 10000;
|
||||
#endif
|
||||
int64_t tBegin = GET_TIMESTAMP();
|
||||
while (loopNum--)
|
||||
{
|
||||
static int curLoop = 0;
|
||||
|
||||
// cpu usage limitation
|
||||
gettimeofday(&tv, NULL);
|
||||
|
||||
std::vector<BasicRequestPtr> reqs;
|
||||
int requestNum = 10;
|
||||
for (int i = 0; i < requestNum; i++)
|
||||
{
|
||||
reqs.push_back(new Request());
|
||||
}
|
||||
gInitValue = random();
|
||||
|
||||
TransactionExecutor::getInstance()->executeTransSync(reqs, -1);
|
||||
|
||||
#if TEST_MEM_LEAK
|
||||
#else
|
||||
// check result
|
||||
int64_t targetValue = requestNum * (gInitValue + 1);
|
||||
// int64_t targetValue = requestNum * (gInitValue++);
|
||||
if (targetValue != gCoalesceValue)
|
||||
{
|
||||
log_error("run test failed. initValue:%lld, coalValue:%lld, targValue:%lld", gInitValue, gCoalesceValue, targetValue);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
gCoalesceValue = 0;
|
||||
curLoop++;
|
||||
// log_error("runing step:%d", curLoop);
|
||||
|
||||
// limitCurrProcessCpuUsage(tv, 0.6);
|
||||
}
|
||||
int64_t tEnd = GET_TIMESTAMP();
|
||||
|
||||
/*
|
||||
* Summary:
|
||||
* CPU-bound:
|
||||
* ThreadNum DataNum TimeCost(us)
|
||||
* DoubleLock 3 100w [184018359 204070493]
|
||||
* DoubleLock 7(2*core) 100w [180809002 ...]
|
||||
* ResponseUnLock 3 100w 245552365
|
||||
* ResponseUnLock 7 100w 260934935
|
||||
* */
|
||||
log_error("run test success! timeCost:%ld", tEnd - tBegin);
|
||||
sleep(1000000);
|
||||
return 0;
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#BIN_NAME="TransactionPool"
|
||||
#BIN_NAME="AtomicTester"
|
||||
#BIN_NAME="ConcurrentTransPoolTester"
|
||||
BIN_NAME="MutexCondTester"
|
||||
|
||||
PROCESS_ID=`pgrep ${BIN_NAME}`
|
||||
if [ -n "$PROCESS_ID" ];then
|
||||
kill -9 $PROCESS_ID
|
||||
echo "$PROCESS_ID exit"
|
||||
fi
|
||||
|
||||
rm -rf ./core.* ./log ./$BIN_NAME
|
||||
g++ -g ${BIN_NAME}".cpp" -I../ -I../../../../common -L../../../../common \
|
||||
-L../TransPool -Bstatic -lcommon -lTransPool -lpthread -o $BIN_NAME
|
||||
|
||||
<<!
|
||||
#test concurrent transaction executor case!
|
||||
rm -rf ../TransPool ./core.* ./log ./$BIN_NAME
|
||||
|
||||
cd ..
|
||||
make -f ./Makefile
|
||||
cd ./Demo
|
||||
|
||||
g++ -g ConcurTransPoolTester.cpp -I../ -I../../../../common -L../../../../common \
|
||||
-L../TransPool -Bstatic -lcommon -lTransPool -lpthread -o $BIN_NAME
|
||||
!
|
||||
|
||||
<<!
|
||||
#test transaction executor case!
|
||||
rm -rf ../TransPool ./core.* ./log ./TransactionPool
|
||||
|
||||
cd ..
|
||||
make -f ./Makefile
|
||||
cd ./Demo
|
||||
|
||||
g++ -g TransPoolTester.cpp -I../ -I../../../../common -L../../../../common \
|
||||
-L../TransPool -Bstatic -lcommon -lTransPool -lpthread -o TransactionPool
|
||||
!
|
||||
|
||||
<<!
|
||||
# test atomic class
|
||||
rm -rf ./core.* ./log ./${BIN_NAME}
|
||||
g++ -g AtomicTester.cpp -I.. -I../../../../common -L../../../../common \
|
||||
-L../TransPool -Bstatic -lTransPool -lcommon -lpthread -o ${BIN_NAME}
|
||||
!
|
@ -1,255 +0,0 @@
|
||||
/**
|
||||
* 锁模板类:
|
||||
* 1.此类非具体锁对象,具体锁类型又模板参数决定
|
||||
* 2.需要加锁的类,仅需继承ThreadMutex、ThreadRecMutex、ThreadLock、ThreadRecLock
|
||||
* 等类,即可实现锁,ThreadMutex和ThreadLock是不可重入锁,而ThreadRecMutex和
|
||||
* ThreadRecLock是可重入锁;ThreadMutex和ThreadRecMutex是单纯的互斥锁,ThreadLock
|
||||
* 和ThreadRecLock是互斥锁和条件变量的结合体,可以用来做线程同步和通信
|
||||
* Jul 16, 2019
|
||||
* By qiuyu
|
||||
*/
|
||||
|
||||
#ifndef __LOCK_H_
|
||||
#define __LOCK_H_
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <cerrno>
|
||||
|
||||
/**
|
||||
* @brief 锁模板类其他具体锁配合使用,
|
||||
* 构造时候加锁,析够的时候解锁
|
||||
*/
|
||||
template <typename T>
|
||||
class Lock
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 构造函数,构造时枷锁
|
||||
* @param mutex 锁对象
|
||||
*/
|
||||
Lock(const T& mutex)
|
||||
:
|
||||
mMutex(mutex)
|
||||
{
|
||||
mMutex.lock();
|
||||
mAcquired = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析构,析构时解锁
|
||||
*/
|
||||
virtual ~Lock()
|
||||
{
|
||||
if (mAcquired)
|
||||
{
|
||||
mMutex.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 上锁, 如果已经上锁,则抛出异常
|
||||
*/
|
||||
bool acquire() const
|
||||
{
|
||||
if (mAcquired)
|
||||
{
|
||||
log_error("lock has beed locked!");
|
||||
return false;
|
||||
}
|
||||
mMutex.lock();
|
||||
mAcquired = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 尝试上锁.
|
||||
*/
|
||||
bool tryAcquire() const
|
||||
{
|
||||
mAcquired = mMutex.tryLock();
|
||||
return mAcquired;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 释放锁, 如果没有上过锁, 则抛出异常
|
||||
*/
|
||||
bool release() const
|
||||
{
|
||||
if (!mAcquired)
|
||||
{
|
||||
log_error("thread hasn't been locked!");
|
||||
return false;
|
||||
}
|
||||
|
||||
mMutex.unlock();
|
||||
mAcquired = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 是否已经上锁.
|
||||
* @return 返回true已经上锁,否则返回false
|
||||
*/
|
||||
bool acquired() const
|
||||
{
|
||||
return mAcquired;
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief 构造函数
|
||||
* 用于锁尝试操作,与Lock相似
|
||||
*/
|
||||
Lock(const T& mutex, bool)
|
||||
:
|
||||
mMutex(mutex)
|
||||
{
|
||||
mAcquired = mMutex.tryLock();
|
||||
}
|
||||
|
||||
private:
|
||||
// Not implemented; prevents accidental use.
|
||||
Lock(const Lock&);
|
||||
Lock& operator=(const Lock&);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* 锁对象
|
||||
*/
|
||||
const T& mMutex;
|
||||
|
||||
/**
|
||||
* 是否已经上锁
|
||||
*/
|
||||
mutable bool mAcquired;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief 尝试上锁
|
||||
*/
|
||||
template <typename T>
|
||||
class TryLock : public Lock<T>
|
||||
{
|
||||
public:
|
||||
TryLock(const T& mutex)
|
||||
:
|
||||
Lock<T>(mutex, true)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief 空锁, 不做任何锁动作
|
||||
*/
|
||||
class EmptyMutex
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 写锁.
|
||||
* @return int, 0 正确
|
||||
*/
|
||||
int lock() const { return 0; }
|
||||
|
||||
/**
|
||||
* @brief 解写锁
|
||||
*/
|
||||
int unlock() const { return 0; }
|
||||
|
||||
/**
|
||||
* @brief 尝试解锁.
|
||||
* @return int, 0 正确
|
||||
*/
|
||||
bool trylock() const { return true; }
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief 读写锁读锁模板类
|
||||
* 构造时候加锁,析够的时候解锁
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
class RW_RLock
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 构造函数,构造时枷锁
|
||||
* @param lock 锁对象
|
||||
*/
|
||||
RW_RLock(T& lock)
|
||||
:
|
||||
mRWLock(lock),
|
||||
mAcquired(false)
|
||||
{
|
||||
mRWLock.ReadLock();
|
||||
mAcquired = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析构时解锁
|
||||
*/
|
||||
~RW_RLock()
|
||||
{
|
||||
if (mAcquired)
|
||||
{
|
||||
mRWLock.Unlock();
|
||||
}
|
||||
}
|
||||
private:
|
||||
/**
|
||||
*锁对象
|
||||
*/
|
||||
const T& mRWLock;
|
||||
|
||||
/**
|
||||
* 是否已经上锁
|
||||
*/
|
||||
mutable bool mAcquired;
|
||||
|
||||
RW_RLock(const RW_RLock&);
|
||||
RW_RLock& operator=(const RW_RLock&);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class RW_WLock
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 构造函数,构造时枷锁
|
||||
* @param lock 锁对象
|
||||
*/
|
||||
RW_WLock(T& lock)
|
||||
:
|
||||
mRWLock(lock),
|
||||
mAcquired(false)
|
||||
{
|
||||
mRWLock.WriteLock();
|
||||
mAcquired = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析构时解锁
|
||||
*/
|
||||
~RW_WLock()
|
||||
{
|
||||
if(mAcquired)
|
||||
{
|
||||
mRWLock.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
*锁对象
|
||||
*/
|
||||
const T& mRWLock;
|
||||
/**
|
||||
* 是否已经上锁
|
||||
*/
|
||||
mutable bool mAcquired;
|
||||
|
||||
RW_WLock(const RW_WLock&);
|
||||
RW_WLock& operator=(const RW_WLock&);
|
||||
};
|
||||
#endif // __LOCK_H_
|
@ -1,53 +0,0 @@
|
||||
# File: makefile
|
||||
#
|
||||
# This is a makefile for use with g++
|
||||
#
|
||||
LIBNAME = TransPool
|
||||
|
||||
VERSION=`git log -l --pretty=%h`
|
||||
|
||||
CC = g++
|
||||
|
||||
.SUFFIXES: .cpp .cc .h
|
||||
|
||||
#FLAGS = -std=c++0x -gdwarf-2 -g3 -Wall -DACTIVE_GIT_VERSION="${VERSION}" -DOMN_PLATFORM_UNIX $(Optimize) -DAOS_USERLAND
|
||||
FLAGS = -gdwarf-2 -g3 -Wall -DACTIVE_GIT_VERSION="${VERSION}" -DOMN_PLATFORM_UNIX $(Optimize) -DAOS_USERLAND
|
||||
|
||||
INCDIRS = -I../../ -I../../../common -I../../../ -I../../../3rdparty/jsoncpp/include/ -I../../../api/ -I../../../3rdparty/curl/include64/ -I../../../3rdparty/mysql64/include/mysql/
|
||||
|
||||
OBJDIR = ./$(LIBNAME)
|
||||
$(warning $(OBJDIR))
|
||||
|
||||
CREATEDIR = $(shell if [ -d $(OBJDIR) ]; then echo ""; else mkdir $(OBJDIR); fi)
|
||||
|
||||
OUTPUTDIR = ./$(OBJDIR)
|
||||
|
||||
LIBS = -L/usr/local/scws/lib -lnsl -lpthread -lstdc++ -lscws
|
||||
|
||||
#
|
||||
# objects and targets
|
||||
#
|
||||
OBJECTS = $(patsubst %.cpp,$(OBJDIR)/%.o,$(wildcard *.cpp))
|
||||
$(warning $(OBJECTS))
|
||||
|
||||
TARGETLIB = lib$(LIBNAME).a
|
||||
|
||||
#
|
||||
# Rules for normal comile and link
|
||||
#
|
||||
all:: lib
|
||||
|
||||
lib: $(OBJECTS)
|
||||
ar rvu $(TARGETLIB) $^
|
||||
mv -f $(TARGETLIB) $(OUTPUTDIR)
|
||||
|
||||
$(OBJECTS): $(OBJDIR)/%.o: %.cpp
|
||||
@echo "do create dir begin"
|
||||
$(CREATEDIR)
|
||||
@echo "do create dir end"
|
||||
$(CC) -c $(FLAGS) $< $(INCDIRS) -o $@
|
||||
#$(CC) -c $(FLAGS) $< -o $@
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf $(OBJECTS) $(OBJDIR) $(OUTPUTDIR)/$(TARGETLIB)
|
@ -1,54 +0,0 @@
|
||||
///////////////////////////////////////////////////////////
|
||||
// //
|
||||
// package the binary posix semaphore which shared //
|
||||
// between threads, not support process communcation //
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __SEM_H_
|
||||
#define __SEM_H_
|
||||
|
||||
#include "AutoPtr.h"
|
||||
|
||||
#include <semaphore.h>
|
||||
#include <stdint.h>
|
||||
|
||||
class Sem : public HandleBase
|
||||
{
|
||||
private:
|
||||
sem_t mOriSem;
|
||||
|
||||
public:
|
||||
Sem()
|
||||
{
|
||||
sem_init(&mOriSem, 0, 0);
|
||||
}
|
||||
|
||||
inline int semWait()
|
||||
{
|
||||
return sem_wait(&mOriSem);
|
||||
}
|
||||
|
||||
inline int semTryWait()
|
||||
{
|
||||
return sem_trywait(&mOriSem);
|
||||
}
|
||||
|
||||
inline int semTimeWait(const uint64_t& micSeconds)
|
||||
{
|
||||
struct timespec timer;
|
||||
timer.tv_sec = micSeconds / 1000;
|
||||
timer.tv_nsec = (micSeconds - (micSeconds / 1000) * 1000) * 1000 * 1000;
|
||||
|
||||
return sem_timedwait(&mOriSem, &timer);
|
||||
}
|
||||
|
||||
inline int semPost()
|
||||
{
|
||||
return sem_post(&mOriSem);
|
||||
}
|
||||
};
|
||||
|
||||
typedef AutoPtr<Sem> SemPtr;
|
||||
|
||||
#endif //__SEM_H__
|
||||
|
@ -1,242 +0,0 @@
|
||||
/**
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __SingletonBase_H_
|
||||
#define __SingletonBase_H_
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdlib>
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
/**
|
||||
* 单件实现类
|
||||
*
|
||||
* 没有实现对单件生命周期的管理,使用示例代码如下:
|
||||
*
|
||||
* class A : public SingletonBase<A, CreateStatic, DefaultLifetime>
|
||||
* {
|
||||
* public:
|
||||
*
|
||||
* A(){cout << "A" << endl;}
|
||||
* ~A()
|
||||
* {
|
||||
* cout << "~A" << endl;
|
||||
* }
|
||||
*
|
||||
* void test(){cout << "test A" << endl;}
|
||||
* };
|
||||
*
|
||||
* 对象的创建方式由CreatePolicy指定, 有如下方式:
|
||||
*
|
||||
* CreateUsingNew: 在堆中采用new创建
|
||||
*
|
||||
* CreateStatic: 在栈中采用static创建
|
||||
*
|
||||
* 对象生命周期管理由LifetimePolicy指定, 有如下方式:
|
||||
*
|
||||
* DefaultLifetime:缺省声明周期管理
|
||||
*
|
||||
*如果单件对象已经析够, 但是还有调用, 会触发异常
|
||||
*
|
||||
* PhoneixLifetime:不死生命周期
|
||||
*
|
||||
* 如果单件对象已经析够, 但是还有调用, 会再创建一个
|
||||
*
|
||||
* NoDestroyLifetime:不析够
|
||||
*
|
||||
* 对象创建后不会调用析够函数析够, 通常采用实例中的方式就可以了
|
||||
*
|
||||
*/
|
||||
/////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* @brief 定义CreatePolicy:定义对象创建策略
|
||||
*/
|
||||
template<typename T>
|
||||
class CreateUsingNew
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 创建.
|
||||
*
|
||||
* @return T*
|
||||
*/
|
||||
static T* create()
|
||||
{
|
||||
return new T;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 释放.
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
static void destroy(T *t)
|
||||
{
|
||||
delete t;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class CreateStatic
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 最大的空间
|
||||
*/
|
||||
union MaxAlign
|
||||
{
|
||||
char t_[sizeof(T)];
|
||||
long double longDouble_;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief 创建.
|
||||
*
|
||||
* @return T*
|
||||
*/
|
||||
static T* create()
|
||||
{
|
||||
static MaxAlign t;
|
||||
return new(&t) T; // placement new语法
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 释放.
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
static void destroy(T *t)
|
||||
{
|
||||
t->~T();
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
/**
|
||||
* @brief 定义LifetimePolicy:定义对象的声明周期管理
|
||||
*/
|
||||
template<typename T>
|
||||
class DefaultLifetime
|
||||
{
|
||||
public:
|
||||
static void deadReference()
|
||||
{
|
||||
}
|
||||
|
||||
static void scheduleDestruction(T*, void (*pFun)())
|
||||
{
|
||||
// 注册程序退出前系统负责调用的用户层函数pFun(如果程序时crash退出,将不会调用)
|
||||
std::atexit(pFun);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class PhoneixLifetime
|
||||
{
|
||||
public:
|
||||
static void deadReference()
|
||||
{
|
||||
_bDestroyedOnce = true;
|
||||
}
|
||||
|
||||
static void scheduleDestruction(T*, void (*pFun)())
|
||||
{
|
||||
if(!_bDestroyedOnce)
|
||||
std::atexit(pFun);
|
||||
}
|
||||
private:
|
||||
static bool _bDestroyedOnce;
|
||||
};
|
||||
template <class T>
|
||||
bool PhoneixLifetime<T>::_bDestroyedOnce = false;
|
||||
|
||||
template <typename T>
|
||||
struct NoDestroyLifetime
|
||||
{
|
||||
static void scheduleDestruction(T*, void (*)())
|
||||
{
|
||||
}
|
||||
|
||||
static void deadReference()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Singleton
|
||||
template
|
||||
<
|
||||
typename T,
|
||||
template<class> class CreatePolicy = CreateUsingNew, // 默认申请策略为CreateUsingNew
|
||||
template<class> class LifetimePolicy = DefaultLifetime
|
||||
>
|
||||
class SingletonBase
|
||||
{
|
||||
public:
|
||||
typedef T instance_type;
|
||||
typedef volatile T volatile_type;
|
||||
|
||||
/**
|
||||
* @brief 获取实例
|
||||
*
|
||||
* @return T*
|
||||
*/
|
||||
static T *getInstance()
|
||||
{
|
||||
//加锁, 双check机制, 保证正确和效率(比在此处加锁效率高)
|
||||
if(!mInstance)
|
||||
{
|
||||
ThreadLock::Lock_t lock(mLock);
|
||||
if(!mInstance)
|
||||
{
|
||||
if(mDestroyed)
|
||||
{
|
||||
LifetimePolicy<T>::deadReference();
|
||||
mDestroyed = false;
|
||||
}
|
||||
mInstance = CreatePolicy<T>::create();
|
||||
LifetimePolicy<T>::scheduleDestruction((T*)mInstance, &destroySingleton);
|
||||
}
|
||||
}
|
||||
|
||||
return (T*)mInstance;
|
||||
}
|
||||
|
||||
virtual ~SingletonBase(){};
|
||||
|
||||
protected:
|
||||
|
||||
static void destroySingleton()
|
||||
{
|
||||
assert(!mDestroyed);
|
||||
CreatePolicy<T>::destroy((T*)mInstance);
|
||||
mInstance = NULL;
|
||||
mDestroyed = true;
|
||||
}
|
||||
protected:
|
||||
|
||||
static ThreadLock mLock;
|
||||
static volatile T* mInstance;
|
||||
static bool mDestroyed;
|
||||
|
||||
protected:
|
||||
SingletonBase(){}
|
||||
SingletonBase (const SingletonBase &);
|
||||
SingletonBase &operator=(const SingletonBase &);
|
||||
};
|
||||
|
||||
// 静态变量初始化
|
||||
template <class T, template<class> class CreatePolicy, template<class> class LifetimePolicy>
|
||||
ThreadLock SingletonBase<T, CreatePolicy, LifetimePolicy>::mLock;
|
||||
|
||||
template <class T, template<class> class CreatePolicy, template<class> class LifetimePolicy>
|
||||
bool SingletonBase<T, CreatePolicy, LifetimePolicy>::mDestroyed = false;
|
||||
|
||||
template <class T, template<class> class CreatePolicy, template<class> class LifetimePolicy>
|
||||
volatile T* SingletonBase<T, CreatePolicy, LifetimePolicy>::mInstance = NULL;
|
||||
|
||||
#endif
|
@ -1,86 +0,0 @@
|
||||
/**
|
||||
* Non-joinable thread implemention
|
||||
* Jul 16, 2019
|
||||
* By qiuyu
|
||||
*/
|
||||
|
||||
#include "Thread.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <cerrno>
|
||||
|
||||
void sleep(long millsecond)
|
||||
{
|
||||
struct timespec ts;
|
||||
ts.tv_sec = millsecond / 1000;
|
||||
ts.tv_nsec = (millsecond % 1000)*1000000;
|
||||
nanosleep(&ts, 0);
|
||||
}
|
||||
|
||||
Thread::Thread()
|
||||
:
|
||||
// mIsRunning(false),
|
||||
mThreadState(Thread::eIdle),
|
||||
mThreadId(-1)
|
||||
{
|
||||
}
|
||||
|
||||
void Thread::threadEntry(Thread *pThread)
|
||||
{
|
||||
// log_error("start thread, id:%lu", pThread->mThreadId);
|
||||
|
||||
// pThread->mIsRunning = true;
|
||||
pThread->mThreadState = Thread::eRunning;
|
||||
{
|
||||
ThreadLock::Lock_t sync(pThread->mThreadLock);
|
||||
// 唤醒调用线程
|
||||
// pThread->mThreadLock.notifyAll();
|
||||
pThread->mThreadLock.notify();
|
||||
}
|
||||
|
||||
pThread->run();
|
||||
// pThread->mIsRunning = false;
|
||||
pThread->mThreadState = Thread::eExited;
|
||||
}
|
||||
|
||||
bool Thread::start()
|
||||
{
|
||||
// 加锁
|
||||
ThreadLock::Lock_t sync(mThreadLock);
|
||||
|
||||
// if (mIsRunning)
|
||||
if (mThreadState != Thread::eIdle)
|
||||
{
|
||||
log_error("thread is still runing! id:%lu, state:%d", mThreadId, mThreadState);
|
||||
return false;
|
||||
}
|
||||
|
||||
int ret = pthread_create(&mThreadId,
|
||||
0,
|
||||
(void *(*)(void *))&threadEntry,
|
||||
(void *)this);
|
||||
|
||||
if (ret != 0)
|
||||
{
|
||||
log_error("start thread failed!");
|
||||
return false;
|
||||
}
|
||||
|
||||
ret = pthread_detach(mThreadId);
|
||||
if (ret != 0)
|
||||
{
|
||||
log_error("detach thread failed, ret:%d", ret);
|
||||
return false;
|
||||
}
|
||||
|
||||
// 在threadEntry中解锁
|
||||
mThreadLock.wait();
|
||||
// log_error("start thread successful! id:%lu", mThreadId);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Thread::isAlive() const
|
||||
{
|
||||
return mThreadState != Thread::eExited;
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
/**
|
||||
* Non-joinable thread implemention
|
||||
* Jul 16, 2019
|
||||
* By qiuyu
|
||||
*/
|
||||
|
||||
#ifndef __THREAD_H_
|
||||
#define __THREAD_H_
|
||||
|
||||
#include "ThreadMonitor.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <signal.h>
|
||||
#include <pthread.h>
|
||||
|
||||
/**
|
||||
* 线程基类,所有自定义线程继承于该类,同时实现run接口即可,
|
||||
*/
|
||||
class Thread
|
||||
{
|
||||
protected:
|
||||
enum ThreadState
|
||||
{
|
||||
eIdle,
|
||||
eRunning,
|
||||
eStopped, // not support now
|
||||
eExited
|
||||
};
|
||||
|
||||
public:
|
||||
Thread();
|
||||
virtual ~Thread(){};
|
||||
|
||||
// 线程运行
|
||||
bool start();
|
||||
|
||||
// 线程是否存活.
|
||||
bool isAlive() const;
|
||||
|
||||
// 获取线程id.
|
||||
pthread_t id() { return mThreadId; }
|
||||
|
||||
protected:
|
||||
|
||||
// 静态函数, 线程入口.
|
||||
static void threadEntry(Thread *pThread);
|
||||
|
||||
// 运行
|
||||
virtual void run() = 0;
|
||||
|
||||
protected:
|
||||
// bool mIsRunning;
|
||||
ThreadState mThreadState;
|
||||
pthread_t mThreadId;
|
||||
|
||||
// 线程锁
|
||||
ThreadLock mThreadLock;
|
||||
};
|
||||
#endif // __THREAD_H_
|
@ -1,86 +0,0 @@
|
||||
/**
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#include "ThreadCond.h"
|
||||
#include <string.h>
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <stdint.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
ThreadCond::ThreadCond()
|
||||
{
|
||||
int rc;
|
||||
|
||||
pthread_condattr_t attr;
|
||||
|
||||
rc = pthread_condattr_init(&attr);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_condattr_init error:%d", errno);
|
||||
}
|
||||
|
||||
rc = pthread_cond_init(&mCond, &attr);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_cond_init error:%d", errno);
|
||||
}
|
||||
|
||||
rc = pthread_condattr_destroy(&attr);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_condattr_destroy error:%d", errno);
|
||||
}
|
||||
}
|
||||
|
||||
ThreadCond::~ThreadCond()
|
||||
{
|
||||
int rc = 0;
|
||||
rc = pthread_cond_destroy(&mCond);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("destroy cond error:%s", string(strerror(rc)).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadCond::signal()
|
||||
{
|
||||
int rc = pthread_cond_signal(&mCond);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_cond_signal error:%d", errno);
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadCond::broadcast()
|
||||
{
|
||||
int rc = pthread_cond_broadcast(&mCond);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_cond_broadcast error:%d", errno);
|
||||
}
|
||||
}
|
||||
|
||||
timespec ThreadCond::abstime(int millsecond) const
|
||||
{
|
||||
struct timeval tv;
|
||||
|
||||
gettimeofday(&tv, 0);
|
||||
// TC_TimeProvider::getInstance()->getNow(&tv);
|
||||
|
||||
int64_t it = tv.tv_sec * (int64_t)1000000 + tv.tv_usec + (int64_t)millsecond * 1000;
|
||||
|
||||
tv.tv_sec = it / (int64_t)1000000;
|
||||
tv.tv_usec = it % (int64_t)1000000;
|
||||
|
||||
timespec ts;
|
||||
ts.tv_sec = tv.tv_sec;
|
||||
ts.tv_nsec = tv.tv_usec * 1000;
|
||||
|
||||
return ts;
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
/**
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __THREAD_COND_H_
|
||||
#define __THREAD_COND_H_
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#include <sys/time.h>
|
||||
#include <cerrno>
|
||||
#include <iostream>
|
||||
#include <assert.h>
|
||||
|
||||
class ThreadMutex;
|
||||
|
||||
/**
|
||||
* 线程信号条件类, 所有锁可以在上面等待信号发生
|
||||
*
|
||||
* 和ThreadMutex、ThreadRecMutex配合使用,
|
||||
*
|
||||
* 通常不直接使用,而是使用ThreadLock/ThreadRecLock;
|
||||
*/
|
||||
class ThreadCond
|
||||
{
|
||||
public:
|
||||
ThreadCond();
|
||||
~ThreadCond();
|
||||
|
||||
/**
|
||||
* 发送信号, 等待在该条件上的一个线程会醒
|
||||
*/
|
||||
void signal();
|
||||
|
||||
/**
|
||||
* 等待在该条件的所有线程都会醒
|
||||
*/
|
||||
void broadcast();
|
||||
|
||||
/**
|
||||
* 获取绝对等待时间
|
||||
*/
|
||||
timespec abstime(int millsecond) const;
|
||||
|
||||
/**
|
||||
* @brief 无限制等待.
|
||||
*
|
||||
* @param M
|
||||
*/
|
||||
template<typename Mutex>
|
||||
void wait(const Mutex& mutex) const
|
||||
{
|
||||
int c = mutex.count();
|
||||
int rc = pthread_cond_wait(&mCond, &mutex.mMutex);
|
||||
mutex.count(c);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("pthread_cond_wait error:%d", errno);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 等待时间.
|
||||
* @return bool, false表示超时, true:表示有事件来了
|
||||
*/
|
||||
template<typename Mutex>
|
||||
bool timedWait(const Mutex& mutex, int millsecond) const
|
||||
{
|
||||
int c = mutex.count();
|
||||
|
||||
timespec ts = abstime(millsecond);
|
||||
|
||||
int rc = pthread_cond_timedwait(&mCond, &mutex.mMutex, &ts);
|
||||
|
||||
mutex.count(c);
|
||||
|
||||
if(rc != 0)
|
||||
{
|
||||
if(rc != ETIMEDOUT)
|
||||
{
|
||||
log_error("pthread_cond_timedwait error:%d", errno);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Not implemented; prevents accidental use.
|
||||
ThreadCond(const ThreadCond&);
|
||||
ThreadCond& operator=(const ThreadCond&);
|
||||
|
||||
private:
|
||||
/**
|
||||
* 线程条件
|
||||
*/
|
||||
mutable pthread_cond_t mCond;
|
||||
};
|
||||
#endif
|
@ -1,205 +0,0 @@
|
||||
/**
|
||||
* 线程锁监控模板类:
|
||||
* 通常线程锁,都通过该类来使用,而不是直接用ThreadMutex、ThreadRecMutex
|
||||
* 该类将ThreadMutex/ThreadRecMutex 与ThreadCond结合起来
|
||||
*
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ThreadMonitor_H_
|
||||
#define _ThreadMonitor_H_
|
||||
|
||||
#include "ThreadMutex.h"
|
||||
#include "ThreadCond.h"
|
||||
|
||||
template <class T, class P>
|
||||
class ThreadMonitor
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief 定义锁控制对象
|
||||
*/
|
||||
typedef Lock<ThreadMonitor<T, P> > Lock_t;
|
||||
typedef TryLock<ThreadMonitor<T, P> > TryLock_t;
|
||||
|
||||
/**
|
||||
* @brief 构造函数
|
||||
*/
|
||||
ThreadMonitor()
|
||||
:
|
||||
mNotifyNum(0)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析够
|
||||
*/
|
||||
virtual ~ThreadMonitor()
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 锁
|
||||
*/
|
||||
void lock() const
|
||||
{
|
||||
mThreadMutex.lock();
|
||||
mNotifyNum = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 解锁, 根据上锁的次数通知
|
||||
*/
|
||||
void unlock() const
|
||||
{
|
||||
// 详见notify(),根据notify次数来signal
|
||||
notifyImpl(mNotifyNum);
|
||||
mThreadMutex.unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 尝试锁.
|
||||
*
|
||||
* @return bool
|
||||
*/
|
||||
bool tryLock() const
|
||||
{
|
||||
bool result = mThreadMutex.tryLock();
|
||||
if(result)
|
||||
{
|
||||
// 加锁成功
|
||||
mNotifyNum = 0;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 等待,当前调用线程在锁上等待,直到事件通知,
|
||||
*/
|
||||
void wait() const
|
||||
{
|
||||
notifyImpl(mNotifyNum);
|
||||
|
||||
// try
|
||||
// {
|
||||
mThreadCond.wait(mThreadMutex);
|
||||
// }
|
||||
// catch(...)
|
||||
// {
|
||||
// mNotifyNum = 0;
|
||||
// throw;
|
||||
// }
|
||||
|
||||
mNotifyNum = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 等待时间,当前调用线程在锁上等待,直到超时或有事件通知
|
||||
*
|
||||
* @param millsecond 等待时间
|
||||
* @return false:超时了, ture:有事件来了
|
||||
*/
|
||||
bool timedWait(int millsecond) const
|
||||
{
|
||||
notifyImpl(mNotifyNum);
|
||||
|
||||
bool rc;
|
||||
|
||||
// try
|
||||
// {
|
||||
rc = mThreadCond.timedWait(mThreadMutex, millsecond);
|
||||
// }
|
||||
// catch(...)
|
||||
// {
|
||||
// mNotifyNum = 0;
|
||||
// throw;
|
||||
// }
|
||||
|
||||
mNotifyNum = 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 通知某一个线程醒来
|
||||
*
|
||||
* 通知等待在该锁上某一个线程醒过来 ,调用该函数之前必须加锁,
|
||||
*
|
||||
* 在解锁的时候才真正通知
|
||||
*/
|
||||
/*调用此函数前必须加锁,notify函数只是改变mNotifyNum值,而在LOCK此锁析构时,会调用unlock函数,这时候才真的会调用signal函数*/
|
||||
void notify()
|
||||
{
|
||||
if(mNotifyNum != -1)
|
||||
{
|
||||
++mNotifyNum;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 通知等待在该锁上的所有线程醒过来,
|
||||
* 注意调用该函数时必须已经获得锁.
|
||||
*
|
||||
* 该函数调用前之必须加锁, 在解锁的时候才真正通知
|
||||
*/
|
||||
void notifyAll()
|
||||
{
|
||||
mNotifyNum = -1;
|
||||
}
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief 通知实现.
|
||||
*
|
||||
* @param nnotify 上锁的次数
|
||||
*/
|
||||
void notifyImpl(int nnotify) const
|
||||
{
|
||||
if(nnotify != 0)
|
||||
{
|
||||
// notifyAll
|
||||
if(nnotify == -1)
|
||||
{
|
||||
mThreadCond.broadcast();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
// 按照上锁次数进行通知
|
||||
while(nnotify > 0)
|
||||
{
|
||||
mThreadCond.signal();
|
||||
--nnotify;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief noncopyable
|
||||
*/
|
||||
ThreadMonitor(const ThreadMonitor&);
|
||||
void operator=(const ThreadMonitor&);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* 上锁的次数
|
||||
*/
|
||||
mutable int mNotifyNum;
|
||||
mutable P mThreadCond;
|
||||
T mThreadMutex;
|
||||
};
|
||||
|
||||
/**
|
||||
* 普通线程锁
|
||||
*/
|
||||
typedef ThreadMonitor<ThreadMutex, ThreadCond> ThreadLock;
|
||||
|
||||
/**
|
||||
* 循环锁(一个线程可以加多次锁)
|
||||
*/
|
||||
typedef ThreadMonitor<ThreadRecMutex, ThreadCond> ThreadRecLock;
|
||||
|
||||
#endif
|
@ -1,197 +0,0 @@
|
||||
/**
|
||||
*
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
#include "ThreadMutex.h"
|
||||
#include <string.h>
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
|
||||
ThreadMutex::ThreadMutex()
|
||||
{
|
||||
int rc;
|
||||
pthread_mutexattr_t attr;
|
||||
rc = pthread_mutexattr_init(&attr);
|
||||
assert(rc == 0);
|
||||
|
||||
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
|
||||
assert(rc == 0);
|
||||
|
||||
rc = pthread_mutex_init(&mMutex, &attr);
|
||||
assert(rc == 0);
|
||||
|
||||
rc = pthread_mutexattr_destroy(&attr);
|
||||
assert(rc == 0);
|
||||
|
||||
log_info("construct mutex, this:%p", this);
|
||||
}
|
||||
|
||||
ThreadMutex::~ThreadMutex()
|
||||
{
|
||||
int rc = 0;
|
||||
rc = pthread_mutex_destroy(&mMutex);
|
||||
assert(rc == 0);
|
||||
|
||||
log_info("deconstruct mutex, this:%p", this);
|
||||
}
|
||||
|
||||
void ThreadMutex::lock() const
|
||||
{
|
||||
int rc = pthread_mutex_lock(&mMutex);
|
||||
if(rc != 0)
|
||||
{
|
||||
if(rc == EDEADLK)
|
||||
{
|
||||
log_error("dead lock error. ret:%d", rc);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_error("lock error. ret:%d", rc);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool ThreadMutex::tryLock() const
|
||||
{
|
||||
int rc = pthread_mutex_trylock(&mMutex);
|
||||
if (rc != 0 && rc != EBUSY)
|
||||
{
|
||||
if (rc == EDEADLK)
|
||||
{
|
||||
log_error("dead lock error. ret:%d", rc);
|
||||
}
|
||||
else
|
||||
{
|
||||
log_error("trylock error. ret:%d", rc);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return (rc == 0);
|
||||
}
|
||||
|
||||
void ThreadMutex::unlock() const
|
||||
{
|
||||
int rc = pthread_mutex_unlock(&mMutex);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("unlock error. ret:%d", rc);
|
||||
// return false;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int ThreadMutex::count() const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ThreadMutex::count(int c) const
|
||||
{
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
ThreadRecMutex::ThreadRecMutex()
|
||||
:
|
||||
mCount(0)
|
||||
{
|
||||
int rc;
|
||||
pthread_mutexattr_t attr;
|
||||
|
||||
rc = pthread_mutexattr_init(&attr);
|
||||
if(rc != 0) log_error("init error. ret:%d", rc);
|
||||
|
||||
rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
if(rc != 0) log_error("settype error. ret:%d", rc);
|
||||
|
||||
rc = pthread_mutex_init(&mMutex, &attr);
|
||||
if(rc != 0) log_error("init error. ret:%d", rc);
|
||||
|
||||
rc = pthread_mutexattr_destroy(&attr);
|
||||
if(rc != 0) log_error("destroy error. ret:%d", rc);
|
||||
}
|
||||
|
||||
ThreadRecMutex::~ThreadRecMutex()
|
||||
{
|
||||
while (mCount)
|
||||
{
|
||||
unlock();
|
||||
}
|
||||
|
||||
int rc = 0;
|
||||
rc = pthread_mutex_destroy(&mMutex);
|
||||
if(rc != 0) log_error("destroy error! ret:%d", rc);
|
||||
}
|
||||
|
||||
int ThreadRecMutex::lock() const
|
||||
{
|
||||
int rc = pthread_mutex_lock(&mMutex);
|
||||
if(rc != 0) log_error("lock error. ret:%d", rc);
|
||||
|
||||
// 用户重复加锁的话,在这里自动释放一次锁,那么对于用户来说,还是仅需要一次解锁操作
|
||||
if(++mCount > 1)
|
||||
{
|
||||
rc = pthread_mutex_unlock(&mMutex);
|
||||
assert(rc == 0);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ThreadRecMutex::unlock() const
|
||||
{
|
||||
if(--mCount == 0)
|
||||
{
|
||||
int rc = 0;
|
||||
rc = pthread_mutex_unlock(&mMutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool ThreadRecMutex::tryLock() const
|
||||
{
|
||||
int rc = pthread_mutex_trylock(&mMutex);
|
||||
if(rc != 0 )
|
||||
{
|
||||
if(rc != EBUSY)
|
||||
{
|
||||
log_error("trylock error. ret:%d", rc);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else if(++mCount > 1)
|
||||
{
|
||||
rc = pthread_mutex_unlock(&mMutex);
|
||||
if(rc != 0)
|
||||
{
|
||||
log_error("unlock error. ret:%d", rc);
|
||||
}
|
||||
}
|
||||
|
||||
return (rc == 0);
|
||||
}
|
||||
|
||||
bool ThreadRecMutex::willUnlock() const
|
||||
{
|
||||
return mCount == 1;
|
||||
}
|
||||
|
||||
int ThreadRecMutex::count() const
|
||||
{
|
||||
int c = mCount;
|
||||
mCount = 0;
|
||||
return c;
|
||||
}
|
||||
|
||||
void ThreadRecMutex::count(int c) const
|
||||
{
|
||||
mCount = c;
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
/**
|
||||
*
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __THREAD_MUTEX_H_
|
||||
#define __THREAD_MUTEX_H_
|
||||
|
||||
#include "Lock.h"
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
class ThreadCond;
|
||||
|
||||
/**
|
||||
* @brief 线程锁 .
|
||||
*
|
||||
* 不可重复加锁,即同一个线程不可以重复加锁
|
||||
*
|
||||
* 通常不直接使用,和Monitor配合使用,即ThreadLock;
|
||||
*/
|
||||
class ThreadMutex
|
||||
{
|
||||
public:
|
||||
ThreadMutex();
|
||||
virtual ~ThreadMutex();
|
||||
|
||||
/**
|
||||
* @brief 加锁
|
||||
*/
|
||||
void lock() const;
|
||||
|
||||
/**
|
||||
* @brief 尝试锁
|
||||
*/
|
||||
bool tryLock() const;
|
||||
|
||||
/**
|
||||
* @brief 解锁
|
||||
*/
|
||||
void unlock() const;
|
||||
|
||||
/**
|
||||
* @brief 加锁后调用unlock是否会解锁,给Monitor使用的 永远返回true
|
||||
*/
|
||||
bool willUnlock() const { return true;}
|
||||
|
||||
protected:
|
||||
// noncopyable
|
||||
ThreadMutex(const ThreadMutex&);
|
||||
void operator=(const ThreadMutex&);
|
||||
|
||||
/**
|
||||
* @brief 计数
|
||||
*/
|
||||
int count() const;
|
||||
|
||||
/**
|
||||
* @brief 计数
|
||||
*/
|
||||
void count(int c) const;
|
||||
|
||||
friend class ThreadCond;
|
||||
|
||||
protected:
|
||||
mutable pthread_mutex_t mMutex;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief 线程锁类.
|
||||
* 采用线程库实现
|
||||
**/
|
||||
class ThreadRecMutex
|
||||
{
|
||||
public:
|
||||
ThreadRecMutex();
|
||||
virtual ~ThreadRecMutex();
|
||||
|
||||
int lock() const;
|
||||
int unlock() const;
|
||||
bool tryLock() const;
|
||||
|
||||
/**
|
||||
* @brief 加锁后调用unlock是否会解锁, 给TC_Monitor使用的
|
||||
*/
|
||||
bool willUnlock() const;
|
||||
|
||||
protected:
|
||||
friend class ThreadCond;
|
||||
|
||||
/**
|
||||
* @brief 计数
|
||||
*/
|
||||
int count() const;
|
||||
void count(int c) const;
|
||||
|
||||
private:
|
||||
/**
|
||||
锁对象
|
||||
*/
|
||||
mutable pthread_mutex_t mMutex;
|
||||
mutable int mCount;
|
||||
};
|
||||
#endif
|
@ -1,278 +0,0 @@
|
||||
/**
|
||||
* 线程安全队列
|
||||
* july 16, 2019
|
||||
* created by qiuyu
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __THREADmQueueStorage_H_
|
||||
#define __THREADmQueueStorage_H_
|
||||
|
||||
#include <deque>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include "ThreadMonitor.h"
|
||||
|
||||
template<typename T, typename D = std::deque<T> >
|
||||
class ThreadQueue : protected ThreadLock
|
||||
{
|
||||
public:
|
||||
ThreadQueue()
|
||||
:
|
||||
mQueueSize(0)
|
||||
{
|
||||
};
|
||||
|
||||
public:
|
||||
typedef D QueueType_t;
|
||||
|
||||
/**
|
||||
* @brief 从头部获取数据, 没有数据则等待.
|
||||
*
|
||||
* @param t
|
||||
* @param millsecond 阻塞等待时间(ms)
|
||||
* 0 表示不阻塞
|
||||
* -1 永久等待
|
||||
* @return bool: true, 获取了数据, false, 无数据
|
||||
*/
|
||||
bool pop_front(T& t, size_t millsecond = 0);
|
||||
|
||||
/**
|
||||
* @brief 通知等待在队列上面的线程都醒过来
|
||||
*/
|
||||
void notifyT();
|
||||
|
||||
/**
|
||||
* @brief 放数据到队列后端.
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
void push_back(const T& t);
|
||||
|
||||
/**
|
||||
* @brief 放数据到队列后端.
|
||||
*
|
||||
* @param vt
|
||||
*/
|
||||
void push_back(const QueueType_t &qt);
|
||||
|
||||
/**
|
||||
* @brief 放数据到队列前端.
|
||||
*
|
||||
* @param t
|
||||
*/
|
||||
void push_front(const T& t);
|
||||
|
||||
/**
|
||||
* @brief 放数据到队列前端.
|
||||
*
|
||||
* @param vt
|
||||
*/
|
||||
void push_front(const QueueType_t &qt);
|
||||
|
||||
/**
|
||||
* @brief 等到有数据才交换.
|
||||
*
|
||||
* @param q
|
||||
* @param millsecond 阻塞等待时间(ms)
|
||||
* 0 表示不阻塞
|
||||
* -1 如果为则永久等待
|
||||
* @return 有数据返回true, 无数据返回false
|
||||
*/
|
||||
bool swap(QueueType_t &q, size_t millsecond = 0);
|
||||
|
||||
/**
|
||||
* @brief 队列大小.
|
||||
*
|
||||
* @return size_t 队列大小
|
||||
*/
|
||||
size_t size() const;
|
||||
|
||||
/**
|
||||
* @brief 清空队列
|
||||
*/
|
||||
void clear();
|
||||
|
||||
/**
|
||||
* @brief 是否数据为空.
|
||||
*
|
||||
* @return bool 为空返回true,否则返回false
|
||||
*/
|
||||
bool empty() const;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* 队列
|
||||
*/
|
||||
QueueType_t mQueueStorage;
|
||||
|
||||
/**
|
||||
* 队列长度
|
||||
*/
|
||||
size_t mQueueSize;
|
||||
};
|
||||
|
||||
template<typename T, typename D>
|
||||
bool ThreadQueue<T, D>::pop_front(T& t, size_t millsecond)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
if (mQueueStorage.empty())
|
||||
{
|
||||
if(millsecond == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if(millsecond == (size_t)-1)
|
||||
{
|
||||
wait();
|
||||
}
|
||||
else
|
||||
{
|
||||
//超时了
|
||||
if(!timedWait(millsecond))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mQueueStorage.empty())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
t = mQueueStorage.front();
|
||||
mQueueStorage.pop_front();
|
||||
assert(mQueueSize > 0);
|
||||
--mQueueSize;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::notifyT()
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
notifyAll();
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::push_back(const T& t)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
// 唤醒等待消费的线程
|
||||
notify();
|
||||
|
||||
// request入队
|
||||
mQueueStorage.push_back(t);
|
||||
++mQueueSize;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::push_back(const QueueType_t &qt)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
typename QueueType_t::const_iterator it = qt.begin();
|
||||
typename QueueType_t::const_iterator itEnd = qt.end();
|
||||
while(it != itEnd)
|
||||
{
|
||||
mQueueStorage.push_back(*it);
|
||||
++it;
|
||||
++mQueueSize;
|
||||
notify();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::push_front(const T& t)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
notify();
|
||||
|
||||
mQueueStorage.push_front(t);
|
||||
|
||||
++mQueueSize;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::push_front(const QueueType_t &qt)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
typename QueueType_t::const_iterator it = qt.begin();
|
||||
typename QueueType_t::const_iterator itEnd = qt.end();
|
||||
while(it != itEnd)
|
||||
{
|
||||
mQueueStorage.push_front(*it);
|
||||
++it;
|
||||
++mQueueSize;
|
||||
|
||||
notify();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
bool ThreadQueue<T, D>::swap(QueueType_t &q, size_t millsecond)
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
|
||||
if (mQueueStorage.empty())
|
||||
{
|
||||
if(millsecond == 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if(millsecond == (size_t)-1)
|
||||
{
|
||||
wait();
|
||||
}
|
||||
else
|
||||
{
|
||||
//超时了
|
||||
if(!timedWait(millsecond))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mQueueStorage.empty())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
q.swap(mQueueStorage);
|
||||
//mQueueSize = q.size();
|
||||
mQueueSize = mQueueStorage.size();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
size_t ThreadQueue<T, D>::size() const
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
//return mQueueStorage.size();
|
||||
return mQueueSize;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
void ThreadQueue<T, D>::clear()
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
mQueueStorage.clear();
|
||||
mQueueSize = 0;
|
||||
}
|
||||
|
||||
template<typename T, typename D>
|
||||
bool ThreadQueue<T, D>::empty() const
|
||||
{
|
||||
Lock_t lock(*this);
|
||||
return mQueueStorage.empty();
|
||||
}
|
||||
#endif
|
||||
|
@ -1,122 +0,0 @@
|
||||
|
||||
#include "TimeProvider.h"
|
||||
|
||||
ThreadLock TimeProvider::g_tl;
|
||||
TimeProviderPtr TimeProvider::g_tp = NULL;
|
||||
|
||||
TimeProvider* TimeProvider::getInstance()
|
||||
{
|
||||
if(!g_tp)
|
||||
{
|
||||
ThreadLock::Lock_t lock(g_tl);
|
||||
|
||||
if(!g_tp)
|
||||
{
|
||||
g_tp = new TimeProvider();
|
||||
|
||||
g_tp->start();
|
||||
}
|
||||
}
|
||||
return g_tp.get();
|
||||
}
|
||||
|
||||
TimeProvider::~TimeProvider()
|
||||
{
|
||||
{
|
||||
ThreadLock::Lock_t lock(g_tl);
|
||||
mTerminate = true;
|
||||
g_tl.notify();
|
||||
}
|
||||
|
||||
pthread_join(mThreadId, NULL);
|
||||
}
|
||||
|
||||
void TimeProvider::getNow(timeval *tv)
|
||||
{
|
||||
int idx = mBufIdx;
|
||||
*tv = mTimeVal[idx];
|
||||
|
||||
if(mCpuCycle != 0 && mUseTsc) //cpu-cycle在两个interval周期后采集完成
|
||||
{
|
||||
addTimeOffset(*tv,idx);
|
||||
}
|
||||
else
|
||||
{
|
||||
::gettimeofday(tv, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t TimeProvider::getNowMs()
|
||||
{
|
||||
struct timeval tv;
|
||||
getNow(&tv);
|
||||
return tv.tv_sec * (int64_t)1000 + tv.tv_usec/1000;
|
||||
}
|
||||
|
||||
void TimeProvider::run()
|
||||
{
|
||||
while(!mTerminate)
|
||||
{
|
||||
timeval& tt = mTimeVal[!mBufIdx];
|
||||
|
||||
::gettimeofday(&tt, NULL);
|
||||
|
||||
setTsc(tt);
|
||||
|
||||
mBufIdx = !mBufIdx;
|
||||
|
||||
ThreadLock::Lock_t lock(g_tl);
|
||||
|
||||
g_tl.timedWait(800); //修改800时 需对应修改addTimeOffset中offset判读值
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
float TimeProvider::cpuMHz()
|
||||
{
|
||||
if(mCpuCycle != 0)
|
||||
return 1.0/mCpuCycle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void TimeProvider::setTsc(timeval& tt)
|
||||
{
|
||||
uint32_t low = 0;
|
||||
uint32_t high = 0;
|
||||
rdtsc(low,high);
|
||||
uint64_t current_tsc = ((uint64_t)high << 32) | low;
|
||||
|
||||
uint64_t& last_tsc = mTimeSc[!mBufIdx];
|
||||
timeval& last_tt = mTimeVal[mBufIdx];
|
||||
|
||||
if(mTimeSc[mBufIdx] == 0 || mTimeSc[!mBufIdx] == 0 )
|
||||
{
|
||||
mCpuCycle = 0;
|
||||
last_tsc = current_tsc;
|
||||
}
|
||||
else
|
||||
{
|
||||
time_t sptime = (tt.tv_sec - last_tt.tv_sec)*1000*1000 + (tt.tv_usec - last_tt.tv_usec);
|
||||
mCpuCycle = (float)sptime/(current_tsc - mTimeSc[mBufIdx]); //us
|
||||
last_tsc = current_tsc;
|
||||
}
|
||||
}
|
||||
|
||||
void TimeProvider::addTimeOffset(timeval& tt,const int &idx)
|
||||
{
|
||||
uint32_t low = 0;
|
||||
uint32_t high = 0;
|
||||
rdtsc(low,high);
|
||||
uint64_t current_tsc = ((uint64_t)high << 32) | low;
|
||||
int64_t t = (int64_t)(current_tsc - mTimeSc[idx]);
|
||||
time_t offset = (time_t)(t*mCpuCycle);
|
||||
if(t < -1000 || offset > 1000000)//毫秒级别
|
||||
{
|
||||
mUseTsc = false;
|
||||
::gettimeofday(&tt, NULL);
|
||||
return;
|
||||
}
|
||||
tt.tv_usec += offset;
|
||||
while (tt.tv_usec >= 1000000) { tt.tv_usec -= 1000000; tt.tv_sec++;}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
/**
|
||||
* @file TimeProvider.h
|
||||
* @brief 秒级、微妙级时间提供类.
|
||||
*/
|
||||
|
||||
#ifndef __TIME_PROVIDER_H_
|
||||
#define __TIME_PROVIDER_H_
|
||||
|
||||
#include <string>
|
||||
#include <string.h>
|
||||
#include "ThreadMonitor.h"
|
||||
#include "Thread.h"
|
||||
#include "AutoPtr.h"
|
||||
|
||||
#define rdtsc(low,high) \
|
||||
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
|
||||
|
||||
#define TNOW TimeProvider::getInstance()->getNow()
|
||||
#define TNOWMS TimeProvider::getInstance()->getNowMs()
|
||||
|
||||
class TimeProvider;
|
||||
|
||||
typedef AutoPtr<TimeProvider> TimeProviderPtr;
|
||||
|
||||
/**
|
||||
* @brief 提供秒级别的时间
|
||||
*/
|
||||
class TimeProvider : public Thread, public HandleBase
|
||||
{
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief 获取实例.
|
||||
*
|
||||
* @return TimeProvider&
|
||||
*/
|
||||
static TimeProvider* getInstance();
|
||||
|
||||
/**
|
||||
* @brief 构造函数
|
||||
*/
|
||||
TimeProvider()
|
||||
:
|
||||
mTerminate(false),
|
||||
mUseTsc(true),
|
||||
mCpuCycle(0),
|
||||
mBufIdx(0)
|
||||
{
|
||||
memset(mTimeVal, 0, sizeof(mTimeVal));
|
||||
memset(mTimeSc, 0, sizeof(mTimeSc));
|
||||
|
||||
struct timeval tv;
|
||||
::gettimeofday(&tv, NULL);
|
||||
mTimeVal[0] = tv;
|
||||
mTimeVal[1] = tv;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 析构,停止线程
|
||||
*/
|
||||
~TimeProvider();
|
||||
|
||||
/**
|
||||
* @brief 获取时间.
|
||||
*
|
||||
* @return time_t 当前时间
|
||||
*/
|
||||
time_t getNow() { return mTimeVal[mBufIdx].tv_sec; }
|
||||
|
||||
/**
|
||||
* @brief 获取时间.
|
||||
*
|
||||
* @para timeval
|
||||
* @return void
|
||||
*/
|
||||
void getNow(timeval * tv);
|
||||
|
||||
/**
|
||||
* @brief 获取ms时间.
|
||||
*
|
||||
* @para timeval
|
||||
* @return void
|
||||
*/
|
||||
int64_t getNowMs();
|
||||
|
||||
/**
|
||||
* @brief 获取cpu主频.
|
||||
*
|
||||
* @return float cpu主频
|
||||
*/
|
||||
|
||||
float cpuMHz();
|
||||
|
||||
/**
|
||||
* @brief 运行
|
||||
*/
|
||||
protected:
|
||||
|
||||
virtual void run();
|
||||
|
||||
static ThreadLock g_tl;
|
||||
|
||||
static TimeProviderPtr g_tp;
|
||||
|
||||
private:
|
||||
void setTsc(timeval& tt);
|
||||
|
||||
void addTimeOffset(timeval& tt,const int &idx);
|
||||
|
||||
protected:
|
||||
|
||||
bool mTerminate;
|
||||
|
||||
bool mUseTsc;
|
||||
|
||||
private:
|
||||
float mCpuCycle;
|
||||
|
||||
volatile int mBufIdx;
|
||||
|
||||
timeval mTimeVal[2];
|
||||
|
||||
uint64_t mTimeSc[2];
|
||||
};
|
||||
#endif
|
@ -1,321 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Task Pool, thread unsafe.
|
||||
// Author:qiuyu
|
||||
// Date:Jul 12th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#include "TransactionExecutor.h"
|
||||
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
|
||||
#define MAX_POOL_CAPACITY 1000000
|
||||
|
||||
TransactionExecutor::TransactionExecutor()
|
||||
:
|
||||
mBaseTransacId(1)
|
||||
{
|
||||
log_info("construct TransactionExecutor!");
|
||||
// create thread pool
|
||||
// user configurable with config(CPU-bound and IO-bound)
|
||||
int threadNum = 3;
|
||||
for (int idx = 0; idx < threadNum; idx++)
|
||||
{
|
||||
mThreadPool.push_back(new ThreadShell(this));
|
||||
mThreadPool[idx]->start();
|
||||
}
|
||||
|
||||
// user configurable
|
||||
size_t cap = 10000000;
|
||||
mTransPoolMaxCapacity = cap > MAX_POOL_CAPACITY ? MAX_POOL_CAPACITY : cap;
|
||||
}
|
||||
|
||||
TransactionExecutor::~TransactionExecutor()
|
||||
{
|
||||
terminatePoolThread();
|
||||
// wait for all thread finished(the normal way is to call join)
|
||||
sleep(3);
|
||||
// log_info("deconstruct TransactionExecutor!");
|
||||
}
|
||||
|
||||
bool
|
||||
TransactionExecutor::executeTransAsync(std::vector<BasicRequestPtr> trans)
|
||||
{
|
||||
size_t transNum = trans.size();
|
||||
int ret = isPoolOverload(transNum);
|
||||
if (ret >= 0)
|
||||
{
|
||||
// not overload
|
||||
}
|
||||
else if (ret == -1)
|
||||
{
|
||||
// current pool size more than 3/4 of the capacity, pay attention to it
|
||||
log_error("request too fast, trans pool will be full soon, pay attention!");
|
||||
}
|
||||
else
|
||||
{
|
||||
// pool is full, discard this trans
|
||||
log_error("trans pool is full, need to limit the inputs, discard this request!");
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t transId = createTransId(transNum);
|
||||
if (transId == 0)
|
||||
{
|
||||
log_error("get trans id faield, size:%zu, currTransId:%" PRIu64, transNum, mBaseTransacId);
|
||||
return false;
|
||||
}
|
||||
|
||||
// create response for error control, such as timeout or others
|
||||
createEmptyResponse(transId);
|
||||
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
trans[idx]->setTransId(transId);
|
||||
mTransactionPool.push_back(trans[idx]);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TransactionExecutor::executeTransSync(
|
||||
std::vector<BasicRequestPtr>& trans,
|
||||
int64_t msTimeout)
|
||||
{
|
||||
size_t transNum = trans.size();
|
||||
int ret = isPoolOverload(transNum);
|
||||
if (ret >= 0)
|
||||
{
|
||||
// not overload
|
||||
}
|
||||
else if (ret == -1)
|
||||
{
|
||||
// current pool size more than 3/4 of the capacity, pay attention to it
|
||||
log_error("request too fast, trans pool will be full soon, pay attention!");
|
||||
}
|
||||
else
|
||||
{
|
||||
// pool is full, discard this trans
|
||||
log_error("trans pool is full, need to limit the inputs, discard this request!");
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t transId = createTransId(transNum);
|
||||
if (transId == 0)
|
||||
{
|
||||
log_error("get trans id faield, size:%zu, currTransId:%" PRIu64, transNum, mBaseTransacId);
|
||||
return false;
|
||||
}
|
||||
|
||||
// create response for error control
|
||||
createEmptyResponse(transId);
|
||||
|
||||
SemPtr sem = new Sem();
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
trans[idx]->setSem(sem);
|
||||
trans[idx]->setTimeout(msTimeout);
|
||||
trans[idx]->setTransId(transId);
|
||||
mTransactionPool.push_back(trans[idx]);
|
||||
}
|
||||
|
||||
// wait for the trans been executed
|
||||
for (size_t idx = 0; idx < transNum; idx++)
|
||||
{
|
||||
ret = trans[idx]->timeWait(msTimeout);
|
||||
if (ret < 0)
|
||||
{
|
||||
log_error("proc request failed. errno:%d", errno);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
BasicRequestPtr
|
||||
TransactionExecutor::getTask()
|
||||
{
|
||||
BasicRequestPtr task;
|
||||
bool rslt = mTransactionPool.pop_front(task, -1);
|
||||
if (!rslt)
|
||||
{
|
||||
// when stop the thread, thread shell will wake up form the queue with false
|
||||
// log_error("get task from queue failed.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
void
|
||||
TransactionExecutor::recvResponse(
|
||||
uint64_t transId,
|
||||
const BasicResponsePtr response)
|
||||
{
|
||||
Lock<ThreadMutex> lock(mRespLock);
|
||||
// if the trans not in the map, means it has beed remove for procing failed
|
||||
if (mResponsePool.find(transId) == mResponsePool.end())
|
||||
{
|
||||
log_error("request must be failed yet! transId:%" PRIu64, transId);
|
||||
return;
|
||||
}
|
||||
mResponsePool[transId].push_back(response);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
TransactionExecutor::removeResponse(uint64_t transId)
|
||||
{
|
||||
Lock<ThreadMutex> lock(mRespLock);
|
||||
mResponsePool.erase(transId);
|
||||
log_info("remove trans pool, transId:%" PRIu64, transId);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool
|
||||
TransactionExecutor::transFinished(
|
||||
uint64_t transId,
|
||||
const BasicResponsePtr response,
|
||||
std::deque<BasicResponsePtr>& resps)
|
||||
{
|
||||
bool rslt;
|
||||
Lock<ThreadMutex> lock(mRespLock);
|
||||
|
||||
if (mResponsePool.find(transId) == mResponsePool.end())
|
||||
{
|
||||
// maybe occurs
|
||||
log_error("request must be failed yet! transId:%" PRIu64, transId);
|
||||
return false;
|
||||
}
|
||||
|
||||
// recv response first
|
||||
mResponsePool[transId].push_back(response);
|
||||
|
||||
size_t respSize = mResponsePool[transId].size();
|
||||
rslt = (respSize == TASK_NUM(transId));
|
||||
if (rslt)
|
||||
{
|
||||
resps.swap(mResponsePool[transId]);
|
||||
mResponsePool.erase(transId);
|
||||
log_info("proc trans finished. transId:%" PRIu64, transId);
|
||||
}
|
||||
|
||||
return rslt;
|
||||
}
|
||||
|
||||
void
|
||||
TransactionExecutor::createEmptyResponse(uint64_t transId)
|
||||
{
|
||||
Lock<ThreadMutex> lock(mRespLock);
|
||||
mResponsePool[transId] = std::deque<BasicResponsePtr>();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void TransactionExecutor::terminatePoolThread()
|
||||
{
|
||||
// stop thread
|
||||
for (size_t idx = 0; idx < mThreadPool.size(); idx++)
|
||||
{
|
||||
mThreadPool[idx]->terminate();
|
||||
}
|
||||
|
||||
// trigger those thread sleep on the queue
|
||||
mTransactionPool.notifyT();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int TransactionExecutor::isPoolOverload(size_t currTransNum)
|
||||
{
|
||||
size_t currPoolSize = mTransactionPool.size();
|
||||
size_t totalSize = currPoolSize + currTransNum;
|
||||
if (totalSize <= (mTransPoolMaxCapacity * 3 / 4))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
else if (totalSize > mTransPoolMaxCapacity)
|
||||
{
|
||||
return -2;
|
||||
}
|
||||
|
||||
// need to limit the request speed
|
||||
return -1;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// ThreadShell
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
void
|
||||
TransactionExecutor::ThreadShell::run()
|
||||
{
|
||||
while (mThreadState != Thread::eExited)
|
||||
{
|
||||
// if no task in the queue, get function will sink into block
|
||||
BasicRequestPtr oneTask = mOwner->getTask();
|
||||
if (!oneTask)
|
||||
{
|
||||
// log_info("internal error, get empty task from queue! threadState:%d", mThreadState);
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64_t currTransId = oneTask->getTransId();
|
||||
log_info("proc transaction, transId:%" PRIu64, currTransId);
|
||||
// check timeout
|
||||
bool rslt = oneTask->isTimeout();
|
||||
if (rslt)
|
||||
{
|
||||
mOwner->removeResponse(currTransId);
|
||||
oneTask->wakeUp();
|
||||
log_error("proc task timeout! transId:%" PRIu64, currTransId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// proc task
|
||||
BasicResponsePtr response;
|
||||
int ret = oneTask->procRequest(response);
|
||||
if (ret < 0)
|
||||
{
|
||||
mOwner->removeResponse(currTransId);
|
||||
oneTask->wakeUp();
|
||||
log_error("proc request failed, transId:%" PRIu64, currTransId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// proc response
|
||||
// mOwner->recvResponse(currTransId, response);
|
||||
|
||||
std::deque<BasicResponsePtr> responses;
|
||||
rslt = mOwner->transFinished(currTransId, response, responses);
|
||||
if (!rslt)
|
||||
{
|
||||
// wake up the caller
|
||||
oneTask->wakeUp();
|
||||
continue;
|
||||
}
|
||||
|
||||
// double check timeout
|
||||
rslt = oneTask->isTimeout();
|
||||
if (rslt)
|
||||
{
|
||||
// remove remaining response
|
||||
mOwner->removeResponse(currTransId);
|
||||
oneTask->wakeUp();
|
||||
log_error("proc task timeout! transId:%" PRIu64, currTransId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// trans has finished
|
||||
response->procResponse(responses);
|
||||
oneTask->wakeUp();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
@ -1,161 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Task Pool, thread unsafe.
|
||||
// Author:qiuyu
|
||||
// Date:Jul 12th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __TRANSACTION_EXECUTOR_H_
|
||||
#define __TRANSACTION_EXECUTOR_H_
|
||||
|
||||
#include "Sem.h"
|
||||
#include "ThreadQueue.h"
|
||||
#include "SingletonBase.h"
|
||||
#include "Thread.h"
|
||||
#include "timestamp.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#define TASK_NUM_SHIFT 8
|
||||
#define MAX_TRANS_ID ((1ULL << (64 - TASK_NUM_SHIFT)) - 1)
|
||||
#define SIZE_VALID(size) \
|
||||
( \
|
||||
{ \
|
||||
bool rslt = true; \
|
||||
if (size <= 0 || size > (1ULL << TASK_NUM_SHIFT)) \
|
||||
rslt = false; \
|
||||
rslt; \
|
||||
} \
|
||||
)
|
||||
|
||||
#define TASK_NUM(transId) (transId & ((1ULL << TASK_NUM_SHIFT) - 1))
|
||||
|
||||
class BasicRequest;
|
||||
class BasicResponse;
|
||||
typedef AutoPtr<BasicRequest> BasicRequestPtr;
|
||||
typedef AutoPtr<BasicResponse> BasicResponsePtr;
|
||||
|
||||
// user need to override this class
|
||||
class BasicRequest : public HandleBase
|
||||
{
|
||||
private:
|
||||
uint64_t mTransactionId;
|
||||
uint64_t mExpiredWhen;
|
||||
SemPtr mSem;
|
||||
|
||||
public:
|
||||
BasicRequest()
|
||||
:
|
||||
mTransactionId(0),
|
||||
mExpiredWhen(-1),
|
||||
mSem(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
virtual int procRequest(BasicResponsePtr& response) = 0;
|
||||
|
||||
void setTransId(uint64_t transId) { mTransactionId = transId; }
|
||||
uint64_t getTransId() { return mTransactionId; }
|
||||
|
||||
void setSem(const SemPtr sem) { mSem = sem; }
|
||||
void wakeUp()
|
||||
{
|
||||
if (mSem) mSem->semPost();
|
||||
}
|
||||
|
||||
void setTimeout(int64_t timeout)
|
||||
{
|
||||
mExpiredWhen = timeout < 0 ? (uint64_t)-1
|
||||
: (timeout + GET_TIMESTAMP() < timeout ? (uint64_t)-1 : timeout + GET_TIMESTAMP());
|
||||
}
|
||||
bool isTimeout() { return (uint64_t)GET_TIMESTAMP() >= mExpiredWhen; }
|
||||
|
||||
int timeWait(const int64_t msTimeout) { return mSem->semTimeWait(msTimeout); }
|
||||
};
|
||||
|
||||
class BasicResponse : public HandleBase
|
||||
{
|
||||
public:
|
||||
// for coalescing result
|
||||
virtual int procResponse(std::deque<BasicResponsePtr>& responses) = 0;
|
||||
};
|
||||
|
||||
class TransactionExecutor : public SingletonBase<TransactionExecutor>
|
||||
{
|
||||
private:
|
||||
class ThreadShell : public Thread , public HandleBase
|
||||
{
|
||||
private:
|
||||
TransactionExecutor *mOwner;
|
||||
|
||||
public:
|
||||
ThreadShell(TransactionExecutor *owner)
|
||||
:
|
||||
mOwner(owner)
|
||||
{
|
||||
// log_info("Construct ThreadShell");
|
||||
}
|
||||
|
||||
virtual ~ThreadShell()
|
||||
{
|
||||
// log_info("deconstruct ThreadShell");
|
||||
}
|
||||
virtual void run();
|
||||
|
||||
void terminate() { mThreadState = Thread::eExited; }
|
||||
};
|
||||
typedef AutoPtr<ThreadShell> ThreadShellPtr;
|
||||
|
||||
private:
|
||||
uint64_t mBaseTransacId; // start with 1
|
||||
ThreadMutex mRespLock;
|
||||
std::vector<ThreadShellPtr> mThreadPool;
|
||||
|
||||
size_t mTransPoolMaxCapacity;
|
||||
ThreadQueue<BasicRequestPtr> mTransactionPool;
|
||||
std::map<uint64_t, std::deque<BasicResponsePtr> > mResponsePool;
|
||||
|
||||
public:
|
||||
TransactionExecutor();
|
||||
virtual ~TransactionExecutor();
|
||||
|
||||
bool executeTransAsync(std::vector<BasicRequestPtr> trans);
|
||||
|
||||
// timeout -1 for wait forever
|
||||
bool executeTransSync(
|
||||
std::vector<BasicRequestPtr>& trans,
|
||||
int64_t msTimeout);
|
||||
|
||||
private:
|
||||
inline uint64_t createTransId(size_t taskNum)
|
||||
{
|
||||
bool rslt = SIZE_VALID(taskNum);
|
||||
if (!rslt) return 0;
|
||||
|
||||
uint64_t transId = mBaseTransacId << TASK_NUM_SHIFT;
|
||||
transId += taskNum;
|
||||
mBaseTransacId = (mBaseTransacId >= MAX_TRANS_ID ? 1 : ++mBaseTransacId);
|
||||
|
||||
return transId;
|
||||
}
|
||||
|
||||
void createEmptyResponse(uint64_t transId);
|
||||
BasicRequestPtr getTask();
|
||||
void recvResponse(uint64_t transId, const BasicResponsePtr response);
|
||||
void removeResponse(uint64_t transId);
|
||||
|
||||
bool transFinished(
|
||||
uint64_t transId,
|
||||
const BasicResponsePtr response,
|
||||
std::deque<BasicResponsePtr>& resps);
|
||||
|
||||
void terminatePoolThread();
|
||||
int isPoolOverload(size_t currTransNum);
|
||||
};
|
||||
|
||||
#endif // __TRANSACTION_EXECUTOR_H_
|
@ -1,740 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Implementation of the LIRS cache.
|
||||
// Author:qiuyu
|
||||
// Date:Apr 22th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#include "lirs_cache.h"
|
||||
#include "log.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// stack relevant
|
||||
////////////////////////////////////////////////////////////
|
||||
LirsCache::LirsStack::LirsStack(
|
||||
const int maxLirNum,
|
||||
const int maxStackSize)
|
||||
:
|
||||
mMaxLirEntryNum(maxLirNum),
|
||||
mMaxStackSize(maxStackSize),
|
||||
mCurrLirEntryNum(0),
|
||||
mCurrStackSize(0),
|
||||
mStackBottom(NULL),
|
||||
mStackTop(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LirsCache::LirsStack::~LirsStack()
|
||||
{
|
||||
LirsEntry_t *prev, *curr = mStackBottom;
|
||||
while (curr)
|
||||
{
|
||||
prev = curr;
|
||||
curr = curr->sStackNext;
|
||||
|
||||
if (prev->sEntryState & HIR_BLOCK_SHARED)
|
||||
prev->sEntryState &= ~HIR_BLOCK_SHARED;
|
||||
else
|
||||
delete prev;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
LirsCache::LirsStack::removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry)
|
||||
{
|
||||
if (!entry || !(entry->sEntryState & HIR_BLOCK_ONSTACK))
|
||||
{
|
||||
log_error("internal error, entryEmpty:%d.", !entry);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!entry->sStackPrev)
|
||||
{
|
||||
assert(entry == mStackBottom);
|
||||
|
||||
mStackBottom = entry->sStackNext;
|
||||
if (!mStackBottom) mStackTop = NULL;
|
||||
else mStackBottom->sStackPrev = NULL;
|
||||
}
|
||||
else if (!entry->sStackNext)
|
||||
{
|
||||
assert(entry == mStackTop);
|
||||
|
||||
mStackTop = entry->sStackPrev;
|
||||
if (!mStackTop) mStackBottom = NULL;
|
||||
else mStackTop->sStackNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(entry != mStackBottom && entry != mStackTop);
|
||||
|
||||
entry->sStackPrev->sStackNext = entry->sStackNext;
|
||||
entry->sStackNext->sStackPrev = entry->sStackPrev;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
bool canRelease = (releaseEntry && !(state & HIR_BLOCK_SHARED));
|
||||
if (state & LIR_BLOCK) mCurrLirEntryNum--;
|
||||
state &= (~HIR_BLOCK_ONSTACK & ~HIR_BLOCK_SHARED & ~LIR_BLOCK);
|
||||
mCurrStackSize--;
|
||||
|
||||
if (canRelease)
|
||||
{
|
||||
log_info("remove entry, key:%s", entry->sKey.c_str());
|
||||
entryMap.erase(entry->sKey);
|
||||
delete entry;
|
||||
entry = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry->sStackPrev = entry->sStackNext = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// 1.when call this function, must be has enough space for the appending entry
|
||||
// 2.the entry must be a non-exist entry in the stack
|
||||
void
|
||||
LirsCache::LirsStack::appendEntry(LirsEntry_t *entry)
|
||||
{
|
||||
if (!entry)
|
||||
{
|
||||
log_error("append empty entry.");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (state < 0 || (mCurrLirEntryNum >= mMaxLirEntryNum && (state & LIR_BLOCK)))
|
||||
{
|
||||
log_error("no enough space for the Lir entry");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mCurrStackSize >= mMaxStackSize)
|
||||
{
|
||||
log_error("no enough space for the Hir entry");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// has enough space, append it
|
||||
if (!mStackTop)
|
||||
{
|
||||
// the first one
|
||||
mStackTop = mStackBottom = entry;
|
||||
entry->sStackPrev = NULL;
|
||||
entry->sStackNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
// append to the behind of the top entry
|
||||
mStackTop->sStackNext = entry;
|
||||
entry->sStackPrev = mStackTop;
|
||||
mStackTop = entry;
|
||||
mStackTop->sStackNext = NULL;
|
||||
}
|
||||
|
||||
if (state & LIR_BLOCK) mCurrLirEntryNum++;
|
||||
mCurrStackSize++;
|
||||
|
||||
state |= HIR_BLOCK_ONSTACK;
|
||||
if (state & (HIR_BLOCK_ONQUEUE | HIR_RESIDENT_BLOCK)) state |= HIR_BLOCK_SHARED;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// evicted all HIR blocks those located in the bottom of stack
|
||||
void
|
||||
LirsCache::LirsStack::stackPrune(std::map<std::string, LirsEntry_t*> &entryMap)
|
||||
{
|
||||
if (!mStackBottom) return;
|
||||
|
||||
while (mStackBottom)
|
||||
{
|
||||
if (mStackBottom->sEntryState & LIR_BLOCK) break;
|
||||
removeEntry(mStackBottom, entryMap);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// release one hir block from the bottom of the stack
|
||||
void
|
||||
LirsCache::LirsStack::releaseOneHirEntry(std::map<std::string, LirsEntry_t*> &entryMap)
|
||||
{
|
||||
if (!mStackBottom) return;
|
||||
|
||||
LirsEntry_t *curr = mStackBottom->sStackNext;
|
||||
while (curr)
|
||||
{
|
||||
if (curr->sEntryState & LIR_BLOCK) curr = curr->sStackNext;
|
||||
|
||||
// remove the entry
|
||||
removeEntry(curr, entryMap, true);
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Lirs queue relevant
|
||||
///////////////////////////////////////////////////////////
|
||||
LirsCache::LirsQueue::LirsQueue(const int maxQueueSize)
|
||||
:
|
||||
mMaxQueueSize(maxQueueSize),
|
||||
mCurrQueueSize(0),
|
||||
mQueueHead(NULL),
|
||||
mQueueTail(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LirsCache::LirsQueue::~LirsQueue()
|
||||
{
|
||||
LirsEntry_t *prev, *curr = mQueueHead;
|
||||
while (curr)
|
||||
{
|
||||
prev = curr;
|
||||
curr = curr->sQueueNext;
|
||||
|
||||
if (prev->sEntryState & HIR_BLOCK_SHARED)
|
||||
prev->sEntryState &= ~HIR_BLOCK_SHARED;
|
||||
else
|
||||
delete prev;
|
||||
}
|
||||
}
|
||||
|
||||
// evicted the resident HIR block from the queue
|
||||
// use flag 'release' to forbidden the caller to release the entry
|
||||
// if someone holding it current now
|
||||
void
|
||||
LirsCache::LirsQueue::removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool release)
|
||||
{
|
||||
if (!entry)
|
||||
{
|
||||
log_error("can not remove an empty entry.");
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (!(state & HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
assert(false);
|
||||
log_error("incorrect entry state.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!entry->sQueuePrev)
|
||||
{
|
||||
mQueueHead = entry->sQueueNext;
|
||||
if (!mQueueHead) mQueueTail = NULL;
|
||||
else mQueueHead->sQueuePrev = NULL;
|
||||
}
|
||||
else if (!entry->sQueueNext)
|
||||
{
|
||||
mQueueTail = entry->sQueuePrev;
|
||||
if (!mQueueTail) mQueueHead = NULL;
|
||||
else mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
entry->sQueuePrev->sQueueNext = entry->sQueueNext;
|
||||
entry->sQueueNext->sQueuePrev = entry->sQueuePrev;
|
||||
}
|
||||
|
||||
// double check
|
||||
if (release && !(state & HIR_BLOCK_ONSTACK) && !(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_info("remove entry, key:%s", entry->sKey.c_str());
|
||||
entryMap.erase(entry->sKey);
|
||||
delete entry;
|
||||
entry = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
// clear flag
|
||||
entry->sQueuePrev = entry->sQueueNext = NULL;
|
||||
state &= (~HIR_BLOCK_ONQUEUE & ~HIR_BLOCK_SHARED & ~HIR_RESIDENT_BLOCK);
|
||||
}
|
||||
mCurrQueueSize--;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// when call this function, queue should has enough remaining space for appending
|
||||
void
|
||||
LirsCache::LirsQueue::appendEntry(LirsEntry_t *entry)
|
||||
{
|
||||
if (!entry || (entry->sEntryState & LIR_BLOCK))
|
||||
{
|
||||
log_error("empty entry:%d.", entry == NULL);
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
char &state = entry->sEntryState;
|
||||
if (state < 0 || mCurrQueueSize >= mMaxQueueSize)
|
||||
{
|
||||
log_error("incorrect queue data.");
|
||||
return;
|
||||
}
|
||||
|
||||
// just append to the tail directly
|
||||
if (!mQueueTail)
|
||||
{
|
||||
mQueueHead = mQueueTail = entry;
|
||||
mQueueHead->sQueuePrev = NULL;
|
||||
mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
mQueueTail->sQueueNext = entry;
|
||||
entry->sQueuePrev = mQueueTail;
|
||||
mQueueTail = entry;
|
||||
mQueueTail->sQueueNext = NULL;
|
||||
}
|
||||
mCurrQueueSize++;
|
||||
|
||||
state |= (HIR_BLOCK_ONQUEUE | HIR_RESIDENT_BLOCK);
|
||||
state &= ~LIR_BLOCK;
|
||||
if (state & HIR_BLOCK_ONSTACK) state |= HIR_BLOCK_SHARED;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// LIRS cache relevant
|
||||
///////////////////////////////////////////////////////////
|
||||
LirsCache::LirsCache(const int cacheSize)
|
||||
:
|
||||
mCacheEntrySize(cacheSize)
|
||||
{
|
||||
if (mCacheEntrySize < eMinCacheEntrySize || mCacheEntrySize > eMaxCacheEntrySize)
|
||||
mCacheEntrySize = mCacheEntrySize < eMinCacheEntrySize ? eMinCacheEntrySize : mCacheEntrySize;
|
||||
|
||||
int queueSize = mCacheEntrySize * eQueueSizeRate / 100;
|
||||
int maxLirEntryNum = mCacheEntrySize - queueSize;
|
||||
int maxStackSize = mCacheEntrySize + queueSize; // the extra queue size for holding non-resident HIR blocks
|
||||
|
||||
mBlockStack = new LirsStack(maxLirEntryNum, maxStackSize);
|
||||
mBlockQueue = new LirsQueue(queueSize);
|
||||
}
|
||||
|
||||
LirsCache::~LirsCache()
|
||||
{
|
||||
if (mBlockStack) delete mBlockStack;
|
||||
if (mBlockQueue) delete mBlockQueue;
|
||||
}
|
||||
|
||||
// find the key and adjust the lirs cache
|
||||
LirsEntry_t*
|
||||
LirsCache::findEntry(const std::string &key)
|
||||
{
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr == mEntryMap.end()) return NULL;
|
||||
|
||||
LirsEntry_t *entry = itr->second;
|
||||
assert(entry != NULL);
|
||||
if (!entry || !(entry->sEntryState & HIR_RESIDENT_BLOCK)) return NULL;
|
||||
|
||||
if (key == "182")
|
||||
{
|
||||
bool debug = false;
|
||||
while (debug)
|
||||
{
|
||||
log_info("xxxxxxx77xx11");
|
||||
}
|
||||
}
|
||||
// hit Lir or Resident Hir block, adjust the cache
|
||||
adjustLirsCache(entry);
|
||||
syntaxCheck();
|
||||
return entry;
|
||||
}
|
||||
|
||||
// 1.if exist, update the value
|
||||
// 2.append a new entry
|
||||
bool
|
||||
LirsCache::appendEntry(
|
||||
const std::string &key,
|
||||
const std::string &value)
|
||||
{
|
||||
// find in the stack first
|
||||
LirsEntry_t *entry = NULL;
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr != mEntryMap.end())
|
||||
{
|
||||
entry = itr->second;
|
||||
#if (__cplusplus >= 201103L)
|
||||
// c++0x, use rvalue reference, value can not be used any more
|
||||
entry->sValue = std::move(value);
|
||||
#else
|
||||
entry->sValue = value;
|
||||
#endif // __cplusplus >= 201103L
|
||||
adjustLirsCache(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("update entry, key:%s, value:%s, state:%d", key.c_str(),\
|
||||
value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
// append a new entry
|
||||
entry = new LirsEntry_t();
|
||||
if (!entry)
|
||||
{
|
||||
log_error("allocate memory failed.");
|
||||
return false;
|
||||
}
|
||||
entry->initEntry(0, NULL, NULL, NULL, NULL, key, value);
|
||||
char &state = entry->sEntryState;
|
||||
|
||||
// add into the map
|
||||
mEntryMap[key] = entry;
|
||||
|
||||
// make sure have enough space for appending
|
||||
bool isLirFull = mBlockStack->isLirEntryFull();
|
||||
bool isStackFull = mBlockStack->isStackFull();
|
||||
if (!isLirFull)
|
||||
{
|
||||
if (isStackFull) mBlockStack->releaseOneHirEntry(mEntryMap);
|
||||
|
||||
// add as a lir entry
|
||||
state |= LIR_BLOCK;
|
||||
mBlockStack->appendEntry(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
// add as a resident HIR block
|
||||
bool isQueueFull = mBlockQueue->isHirEntryFull();
|
||||
if (isQueueFull || isStackFull)
|
||||
{
|
||||
if (isQueueFull)
|
||||
{
|
||||
// remove resident HIR block from queue
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
mBlockQueue->removeEntry(head, mEntryMap);
|
||||
}
|
||||
|
||||
// check whether the stack is full or not
|
||||
if (isStackFull)
|
||||
{
|
||||
// remove the lir block in the bottom
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
// append entry as a lir block
|
||||
state |= LIR_BLOCK;
|
||||
mBlockStack->appendEntry(entry);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// append to both the stack and the queue as an resident block
|
||||
// state |= (HIR_RESIDENT_BLOCK | HIR_BLOCK_SHARED);
|
||||
mBlockStack->appendEntry(entry);
|
||||
mBlockQueue->appendEntry(entry);
|
||||
assert(state == 30);
|
||||
syntaxCheck();
|
||||
|
||||
log_info("append entry, key:%s, value:%s, state:%d",\
|
||||
key.c_str(), value.c_str(), entry->sEntryState);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LirsCache::removeEntry(const std::string &key)
|
||||
{
|
||||
MapItr_t itr = mEntryMap.find(key);
|
||||
if (itr == mEntryMap.end()) return true;
|
||||
|
||||
LirsEntry_t *entry = itr->second;
|
||||
char state = entry->sEntryState;
|
||||
|
||||
// remove from the stack
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
mBlockStack->removeEntry(entry, mEntryMap);
|
||||
|
||||
// try to conduct a pruning
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
}
|
||||
|
||||
// remove from the queue
|
||||
if (state & HIR_BLOCK_ONQUEUE)
|
||||
{
|
||||
mBlockQueue->removeEntry(entry, mEntryMap);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// entry must be exist in the cache, even if it's a non-resident block
|
||||
void
|
||||
LirsCache::adjustLirsCache(LirsEntry_t *entry)
|
||||
{
|
||||
char &state = entry->sEntryState;
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
// lir block
|
||||
// bool inStackBottom = (entry->sStackPrev == NULL);
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
// maybe the removed entry is bottom, try to conduct a stack pruning
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
else
|
||||
{
|
||||
// hir block
|
||||
if (state & HIR_RESIDENT_BLOCK)
|
||||
{
|
||||
// resident hir block
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
// evicted from queue
|
||||
mBlockQueue->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
// move the bottom entry in the stack to the end of the queue
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
|
||||
// evicted myself from stack
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
else
|
||||
{
|
||||
// 1.leave its status in HIR and move this block to the end of the queue
|
||||
mBlockQueue->removeEntry(entry, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(entry);
|
||||
|
||||
// 2.append to the stack
|
||||
bool isStackFull = mBlockStack->isStackFull();
|
||||
if (isStackFull)
|
||||
{
|
||||
// remove the first HIR entry from stack
|
||||
mBlockStack->releaseOneHirEntry(mEntryMap);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// non-resident hir block, block must be in the stack, if not in the stack,
|
||||
// it must be a new entry that we should call appendEntry function to add it
|
||||
if (!(state & HIR_BLOCK_ONSTACK) || (state & HIR_BLOCK_ONQUEUE))
|
||||
{
|
||||
log_error("internal error.");
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
|
||||
// remove the resident HIR block from the head of queue first
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
mBlockQueue->removeEntry(head, mEntryMap, true);
|
||||
|
||||
// move the entry in the bottom of the stack into the tail of the queue
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
mBlockStack->removeEntry(bottom, mEntryMap, false);
|
||||
mBlockQueue->appendEntry(bottom);
|
||||
|
||||
// remove the entry from the stack first, then conduct stack prune
|
||||
mBlockStack->removeEntry(entry, mEntryMap, false);
|
||||
|
||||
mBlockStack->stackPrune(mEntryMap);
|
||||
|
||||
state |= LIR_BLOCK;
|
||||
}
|
||||
}
|
||||
|
||||
// append this entry to the top of the stack
|
||||
mBlockStack->appendEntry(entry);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// check LIRS cache
|
||||
bool LirsCache::syntaxCheck()
|
||||
{
|
||||
int stackBlockNum = 0;
|
||||
int stackLirBlockNum = 0;
|
||||
int stackRHirBlockNum = 0;
|
||||
int stackNRHirBlockNum = 0;
|
||||
int queueSharedBlockNum = 0;
|
||||
|
||||
// check stack
|
||||
if (mBlockStack)
|
||||
{
|
||||
LirsEntry_t *bottom = mBlockStack->getBottomOfStack();
|
||||
LirsEntry_t *top = mBlockStack->getTopOfStack();
|
||||
|
||||
char state;
|
||||
LirsEntry_t *prev = NULL, *curr = bottom;
|
||||
while (curr)
|
||||
{
|
||||
state = curr->sEntryState;
|
||||
if (state <= 0 ||
|
||||
state > (HIR_BLOCK_SHARED + HIR_BLOCK_ONQUEUE + HIR_BLOCK_ONSTACK + HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect entry state.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(state & HIR_BLOCK_ONSTACK))
|
||||
{
|
||||
log_error("incorrect LIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
stackBlockNum++;
|
||||
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
if ((state & HIR_RESIDENT_BLOCK)
|
||||
|| (state & HIR_BLOCK_ONQUEUE)
|
||||
|| (state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackLirBlockNum++;
|
||||
}
|
||||
else if (state & HIR_RESIDENT_BLOCK)
|
||||
{
|
||||
if (!(state & HIR_BLOCK_ONQUEUE)
|
||||
|| !(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackRHirBlockNum++;
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((state & HIR_BLOCK_ONQUEUE)
|
||||
|| (state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect LIR block. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
stackNRHirBlockNum++;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = curr->sStackNext;
|
||||
if (curr && prev != curr->sStackPrev)
|
||||
{
|
||||
log_error("incorrect double link.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
assert(prev == top);
|
||||
}
|
||||
|
||||
// check cache size
|
||||
if (stackRHirBlockNum > mBlockQueue->getCurrQueueSize())
|
||||
{
|
||||
log_error("check RHir block failed.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
// check queue
|
||||
if (mBlockQueue)
|
||||
{
|
||||
LirsEntry_t *head = mBlockQueue->getHeadOfQueue();
|
||||
LirsEntry_t *tail = mBlockQueue->getTailOfQueue();
|
||||
|
||||
char state;
|
||||
LirsEntry_t *prev = NULL, *curr = head;
|
||||
while (curr)
|
||||
{
|
||||
state = curr->sEntryState;
|
||||
if (state <= 0 ||
|
||||
state > (HIR_BLOCK_SHARED + HIR_BLOCK_ONQUEUE + HIR_BLOCK_ONSTACK + HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect entry state.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(state & HIR_BLOCK_ONQUEUE) || !(state & HIR_RESIDENT_BLOCK))
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state & LIR_BLOCK)
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state & HIR_BLOCK_ONSTACK)
|
||||
{
|
||||
if (!(state & HIR_BLOCK_SHARED))
|
||||
{
|
||||
log_error("incorrect Resident HIR block state. state:%d", state);
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
queueSharedBlockNum++;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
curr = curr->sQueueNext;
|
||||
if (curr && prev != curr->sQueuePrev)
|
||||
{
|
||||
log_error("incorrect double link.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
assert(prev == tail);
|
||||
}
|
||||
|
||||
if (stackRHirBlockNum != queueSharedBlockNum)
|
||||
{
|
||||
log_error("shared pointer occur error.");
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Implementation of the LIRS cache.
|
||||
// Author:qiuyu
|
||||
// Date:Apr 22th,2019
|
||||
//
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef LIRS_CACHE_H__
|
||||
#define LIRS_CACHE_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
// LIRS use two data structure to hold all cache data, LIRS stack and queue.
|
||||
// Data to be described as hot data and cold data, hot data names LIR
|
||||
// and cold data is HIR, all LIR data are located in the LIRS stack and
|
||||
// the others located either in the stack or queue; The HIR data also be
|
||||
// divided into resident and non-resident, all resident HIR data are linked
|
||||
// to be a small size queue
|
||||
|
||||
#define LIR_BLOCK 1 // Hot data
|
||||
#define HIR_RESIDENT_BLOCK 2 // HIR is cold data
|
||||
#define HIR_BLOCK_ONSTACK 4
|
||||
#define HIR_BLOCK_ONQUEUE 8
|
||||
#define HIR_BLOCK_SHARED 16 // shared Resident HIR entry reference between Stack and Queue
|
||||
// 1.Unfixed data type(include either key or value):
|
||||
// unsigned long long, float, double, string
|
||||
// 2.Except 1, others is fixed, such as the following:
|
||||
// char, short, int, the unsigned series that size is small than 8, and so on
|
||||
// #define HIR_BLOCK_FIXED 32
|
||||
|
||||
typedef struct LirsEntry
|
||||
{
|
||||
char sEntryState;
|
||||
// 1.we assume that the value is big enough, so the space cost in using shared entry
|
||||
// mechanism(two double link pointer) will be cheaper than clone the same entry
|
||||
// 2.use shared ptr let us implement the LRU cache with only one Hashmap
|
||||
struct LirsEntry *sStackPrev;
|
||||
struct LirsEntry *sStackNext;
|
||||
struct LirsEntry *sQueuePrev;
|
||||
struct LirsEntry *sQueueNext;
|
||||
std::string sKey;
|
||||
std::string sValue;
|
||||
|
||||
void initEntry(
|
||||
const char state,
|
||||
struct LirsEntry *sPrev,
|
||||
struct LirsEntry *sNext,
|
||||
struct LirsEntry *qPrev,
|
||||
struct LirsEntry *qNext,
|
||||
const std::string &key,
|
||||
const std::string &value)
|
||||
{
|
||||
sEntryState = state;
|
||||
sStackPrev = sPrev;
|
||||
sStackNext = sNext;
|
||||
sQueuePrev = qPrev;
|
||||
sQueueNext = qNext;
|
||||
#if (__cplusplus >= 201103L)
|
||||
sKey = std::move(key);
|
||||
sValue = std::move(value);
|
||||
#else
|
||||
sKey = key;
|
||||
sValue = value;
|
||||
#endif
|
||||
}
|
||||
}LirsEntry_t;
|
||||
|
||||
|
||||
class LirsCache
|
||||
{
|
||||
private:
|
||||
enum CacheRelevant
|
||||
{
|
||||
eQueueSizeRate = 1, // 1%
|
||||
eMinCacheEntrySize = 100,
|
||||
eMaxCacheEntrySize = 500000
|
||||
};
|
||||
|
||||
private:
|
||||
typedef std::map<std::string, LirsEntry_t*>::iterator MapItr_t;
|
||||
|
||||
class LirsStack
|
||||
{
|
||||
private:
|
||||
int mMaxLirEntryNum; // Maximum LIR entry number
|
||||
int mMaxStackSize; // maximum real stack capacity, contain LIR + resident HIR + non-resident blocks
|
||||
int mCurrLirEntryNum;
|
||||
int mCurrStackSize;
|
||||
LirsEntry_t* mStackBottom;
|
||||
LirsEntry_t* mStackTop;
|
||||
|
||||
public:
|
||||
LirsStack(const int maxLir, const int maxStackSize);
|
||||
virtual ~LirsStack();
|
||||
|
||||
inline LirsEntry_t* getBottomOfStack() { return mStackBottom; }
|
||||
inline LirsEntry_t* getTopOfStack() { return mStackTop; }
|
||||
inline bool isLirEntryFull() { return mCurrLirEntryNum >= mMaxLirEntryNum; }
|
||||
inline bool isStackFull() { return mCurrStackSize >= mMaxStackSize; }
|
||||
void stackPrune(std::map<std::string, LirsEntry_t*> &entryMap);
|
||||
void releaseOneHirEntry(std::map<std::string, LirsEntry_t*> &entryMap);
|
||||
|
||||
void appendEntry(LirsEntry_t *entry);
|
||||
void removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry = true);
|
||||
};
|
||||
|
||||
class LirsQueue
|
||||
{
|
||||
private:
|
||||
int mMaxQueueSize; // Maximum resident HIR entry number
|
||||
int mCurrQueueSize;
|
||||
LirsEntry_t *mQueueHead;
|
||||
LirsEntry_t *mQueueTail;
|
||||
|
||||
public:
|
||||
LirsQueue(const int maxQueueSize);
|
||||
virtual ~LirsQueue();
|
||||
|
||||
inline LirsEntry_t* getHeadOfQueue() { return mQueueHead; }
|
||||
inline LirsEntry_t* getTailOfQueue() { return mQueueTail; }
|
||||
inline bool isHirEntryFull() { return mCurrQueueSize >= mMaxQueueSize; }
|
||||
inline int getCurrQueueSize() { return mCurrQueueSize; }
|
||||
|
||||
void appendEntry(LirsEntry_t *entry);
|
||||
void removeEntry(
|
||||
LirsEntry_t *entry,
|
||||
std::map<std::string, LirsEntry_t*> &entryMap,
|
||||
const bool releaseEntry = true);
|
||||
};
|
||||
|
||||
public:
|
||||
explicit LirsCache(const int cacheSize = eMaxCacheEntrySize);
|
||||
virtual ~LirsCache();
|
||||
|
||||
LirsEntry_t* findEntry(const std::string &key);
|
||||
|
||||
// user convert all basic data type to string
|
||||
bool appendEntry(const std::string &key, const std::string &value);
|
||||
bool removeEntry(const std::string &key);
|
||||
|
||||
private:
|
||||
void adjustLirsCache(LirsEntry_t * entry);
|
||||
bool syntaxCheck();
|
||||
|
||||
private:
|
||||
int mCacheEntrySize; // LIR and resident HIR block nums
|
||||
LirsStack* mBlockStack; // store all LIR blocks and some HIR blocks
|
||||
LirsQueue* mBlockQueue; // store all resident HIR blocks
|
||||
std::map<std::string, LirsEntry_t*> mEntryMap; // store all cache data for efficient search
|
||||
|
||||
friend class LirsStack;
|
||||
friend class LirsQueue;
|
||||
};
|
||||
|
||||
#endif // LIRS_CACHE_H__
|
@ -1,354 +0,0 @@
|
||||
#include "config_center_parser.h"
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
#include "json/json.h"
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <unistd.h>
|
||||
|
||||
XmlParser::XmlParser()
|
||||
: ParserBase(E_SEARCH_MONITOR_XML_PARSER)
|
||||
{ }
|
||||
|
||||
bool XmlParser::ParseConfig(std::string path)
|
||||
{
|
||||
bool bResult = false;
|
||||
m_oDtcClusterContext.clear();
|
||||
m_oSearchCoreClusterContext.clear();
|
||||
|
||||
FILE* fp = fopen(path.c_str(), "rb" );
|
||||
if (fp == NULL)
|
||||
{
|
||||
monitor_log_error("load config center info failed.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Determine file length
|
||||
fseek( fp, 0L, SEEK_END );
|
||||
int nFileLen = ftell(fp);
|
||||
fseek( fp, 0L, SEEK_SET );
|
||||
|
||||
// Load string
|
||||
std::allocator<char> mem;
|
||||
std::allocator<char>::pointer pBuffer = mem.allocate(nFileLen+1, NULL);
|
||||
if ( fread( pBuffer, nFileLen, 1, fp ) == 1 )
|
||||
{
|
||||
pBuffer[nFileLen] = '\0';
|
||||
bResult = ParseConfigCenterConfig(pBuffer,nFileLen);
|
||||
}
|
||||
fclose(fp);
|
||||
mem.deallocate(pBuffer,1);
|
||||
return bResult;
|
||||
}
|
||||
|
||||
bool XmlParser::ParseConfigCenterConfig(const char *buf, int len)
|
||||
{
|
||||
std::string attr;
|
||||
std::string xmldoc(buf,len);
|
||||
CMarkupSTL xml;
|
||||
if(!xml.SetDoc(xmldoc.c_str()))
|
||||
{
|
||||
monitor_log_error("parse config file error");
|
||||
return false;
|
||||
}
|
||||
xml.ResetMainPos();
|
||||
|
||||
if (!xml.FindElem("MODULE"))
|
||||
{
|
||||
monitor_log_error("no local module info");
|
||||
return false;
|
||||
}
|
||||
|
||||
while (xml.FindChildElem("SERVERSHARDING"))
|
||||
{
|
||||
xml.IntoElem();
|
||||
std::string shardingname = xml.GetAttrib("ShardingName");
|
||||
if ("" == shardingname) {
|
||||
monitor_log_error("sharding name is empty");
|
||||
return false;
|
||||
}
|
||||
while (xml.FindChildElem("INSTANCE"))
|
||||
{
|
||||
if (xml.IntoElem())
|
||||
{
|
||||
ServerNode node;
|
||||
node.ip = xml.GetAttrib("ip");
|
||||
node.port = xml.GetAttrib("port");
|
||||
node.weight = atoi((xml.GetAttrib("weight")).c_str());
|
||||
node.role = xml.GetAttrib("role");
|
||||
if ("" == node.ip || "" == node.port || node.weight <= 0){
|
||||
monitor_log_error("instance is not correct");
|
||||
return false;
|
||||
}
|
||||
if (node.role != INDEX_GEN_ROLE)
|
||||
{
|
||||
node.role = SEARCH_ROLE;
|
||||
}
|
||||
|
||||
std::string addr = node.ip + ":30311";
|
||||
std::pair<std::string, std::string> tmpAddr(shardingname, addr);
|
||||
if (std::find(m_oDtcClusterContext.begin(), m_oDtcClusterContext.end(), tmpAddr)
|
||||
== m_oDtcClusterContext.end())
|
||||
{
|
||||
m_oDtcClusterContext.push_back(tmpAddr);
|
||||
}
|
||||
|
||||
m_oSearchCoreClusterContext[shardingname].push_back(node);
|
||||
xml.OutOfElem();
|
||||
}
|
||||
}
|
||||
xml.OutOfElem();
|
||||
}
|
||||
|
||||
if (0 == m_oDtcClusterContext.size())
|
||||
{
|
||||
monitor_log_error("local server list is empty");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
////***************JsonParser***********************
|
||||
JsonParser::JsonParser()
|
||||
: ParserBase(E_SEARCH_MONITOR_JSON_PARSER)
|
||||
{ }
|
||||
|
||||
bool JsonParser::ParseConfig(std::string path)
|
||||
{
|
||||
Json::Reader reader;
|
||||
Json::Value oLocalClusterContext;
|
||||
|
||||
m_oDtcClusterContext.clear();
|
||||
m_oSearchCoreClusterContext.clear();
|
||||
|
||||
std::ifstream iStream(path.c_str());
|
||||
if (!iStream.is_open())
|
||||
{
|
||||
monitor_log_error("load %s failed.", path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reader.parse(iStream, oLocalClusterContext)
|
||||
&& oLocalClusterContext.isMember("MODULE"))
|
||||
{
|
||||
Json::Value oServerSharding = oLocalClusterContext["MODULE"]["SERVERSHARDING"];
|
||||
std::string sTempShardingName = "";
|
||||
for (int i = 0; i < (int)oServerSharding.size(); i++)
|
||||
{
|
||||
if (oServerSharding[i].isMember("ShardingName")
|
||||
&& oServerSharding[i]["ShardingName"].isString())
|
||||
{
|
||||
sTempShardingName = oServerSharding[i]["ShardingName"].asString();
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("ShardingName in incorrect");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (oServerSharding[i].isMember("INSTANCE")
|
||||
&& oServerSharding[i]["INSTANCE"].isArray())
|
||||
{
|
||||
Json::Value oInstance = oServerSharding[i]["INSTANCE"];
|
||||
for (int i = 0; i < (int)oInstance.size(); i++)
|
||||
{
|
||||
ServerNode node;
|
||||
node.sShardingName = sTempShardingName;
|
||||
if (oInstance[i].isMember("ip")
|
||||
&& oInstance[i]["ip"].isString())
|
||||
{
|
||||
node.ip = oInstance[i]["ip"].asString();
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("ip is incorrect");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string addr = node.ip + ":30311";
|
||||
std::pair<std::string, std::string> tmpAddr(node.ip, addr);
|
||||
if (std::find(m_oDtcClusterContext.begin(), m_oDtcClusterContext.end(), tmpAddr)
|
||||
== m_oDtcClusterContext.end())
|
||||
{
|
||||
m_oDtcClusterContext.push_back(tmpAddr);
|
||||
}
|
||||
|
||||
if (oInstance[i].isMember("port")
|
||||
&& oInstance[i]["port"].isString())
|
||||
{
|
||||
node.port = oInstance[i]["port"].asString();
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("port is incorrect");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (oInstance[i].isMember("weight")
|
||||
&& oInstance[i]["weight"].isInt())
|
||||
{
|
||||
node.weight = oInstance[i]["weight"].asInt();
|
||||
}
|
||||
else
|
||||
{
|
||||
log_error("weight is incorrect");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (oInstance[i].isMember("role")
|
||||
&& oInstance[i]["role"].isString())
|
||||
{
|
||||
node.role = oInstance[i]["role"].asString();
|
||||
}
|
||||
|
||||
if ("" == node.ip || "" == node.port || node.weight <= 0)
|
||||
{
|
||||
log_error("instance is incorrect");
|
||||
return false;
|
||||
}
|
||||
if (node.role != INDEX_GEN_ROLE)
|
||||
{
|
||||
node.role = SEARCH_ROLE;
|
||||
}
|
||||
|
||||
if(oInstance[i].isMember("disasterRole")
|
||||
&& oInstance[i]["disasterRole"].isString())
|
||||
{
|
||||
node.sDisasterRole = oInstance[i]["disasterRole"].asString();
|
||||
}
|
||||
|
||||
std::vector<ServerNode>::iterator it = std::find(m_oSearchCoreClusterContext[node.ip].begin(),
|
||||
m_oSearchCoreClusterContext[node.ip].end(), node);
|
||||
if (it != m_oSearchCoreClusterContext[node.ip].end())
|
||||
{
|
||||
monitor_log_error("instance %s:%s is repeated", node.ip.c_str(), node.port.c_str());
|
||||
return false;
|
||||
}
|
||||
m_oSearchCoreClusterContext[node.ip].push_back(node);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("INSTANCE in incorrect");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (0 == m_oSearchCoreClusterContext.size())
|
||||
{
|
||||
monitor_log_error("server is empty");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
monitor_log_error("localCluster.json file format has some error, please check.");
|
||||
return false;
|
||||
}
|
||||
|
||||
#if DEBUG_LOG_ENABLE
|
||||
SearchCoreClusterCtxIter iter = m_oSearchCoreClusterContext.begin();
|
||||
for ( ; iter != m_oSearchCoreClusterContext.end(); ++iter)
|
||||
{
|
||||
std::vector<ServerNode>::iterator iTempIter = iter->second.begin();
|
||||
for (; iTempIter != iter->second.end(); ++iTempIter)
|
||||
{
|
||||
log_debug("Json-Parser Key(IP:%s) , Value:ShardingName:%s, ip:%s, port:%s, weight:%d, role:%s, disasterRole:%s"
|
||||
, iter->first.c_str()
|
||||
, iTempIter->sShardingName.c_str()
|
||||
, iTempIter->ip.c_str()
|
||||
, iTempIter->port.c_str()
|
||||
, iTempIter->weight
|
||||
, iTempIter->role.c_str()
|
||||
, iTempIter->sDisasterRole.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
DtcClusterCtxIter oDtcIter = m_oDtcClusterContext.begin();
|
||||
for( ; oDtcIter != m_oDtcClusterContext.end(); ++oDtcIter)
|
||||
{
|
||||
log_debug("Json-Parser DtcCluster Key(IP:%s) , Value: (DTC addr:%s)"
|
||||
, oDtcIter->first.c_str() ,oDtcIter->second.c_str());
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
////***************JsonParser***********************
|
||||
ParserBase* const ConfigCenterParser::CreateInstance(int iParserId)
|
||||
{
|
||||
ParserBase* pCurParser = NULL;
|
||||
switch (iParserId)
|
||||
{
|
||||
case E_SEARCH_MONITOR_XML_PARSER:
|
||||
{
|
||||
pCurParser = new XmlParser();
|
||||
}
|
||||
break;
|
||||
case E_SEARCH_MONITOR_JSON_PARSER:
|
||||
{
|
||||
pCurParser = new JsonParser();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
{
|
||||
monitor_log_error("Unknow file type, please check");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if(!UpdateParser(pCurParser))
|
||||
{
|
||||
DELETE(pCurParser);
|
||||
pCurParser = NULL;
|
||||
}
|
||||
return pCurParser;
|
||||
}
|
||||
|
||||
bool ConfigCenterParser::UpdateParser(ParserBase* const pParser)
|
||||
{
|
||||
if(NULL == pParser) { return false;}
|
||||
|
||||
std::string sVaildDir = DtcMonitorConfigMgr::getInstance()->getValidDir();
|
||||
|
||||
if(access(sVaildDir.c_str() , F_OK) != 0)
|
||||
{
|
||||
monitor_log_info("Path:%s is not existing.",sVaildDir.c_str());
|
||||
int iParserId = pParser->GetCurParserId();
|
||||
sVaildDir = (E_SEARCH_MONITOR_JSON_PARSER == iParserId) ? "../conf/localCluster.json" : "../conf/localCluster.xml";
|
||||
}
|
||||
|
||||
monitor_log_info("sCaDirPath:%s" ,sVaildDir.c_str());
|
||||
|
||||
if(sVaildDir.empty() || !pParser->ParseConfig(sVaildDir))
|
||||
{
|
||||
monitor_log_error("UpdateParser error, please check");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ConfigCenterParser::CheckConfigModifyOrNot(long iStartTime)
|
||||
{
|
||||
std::string sVaildDir = DtcMonitorConfigMgr::getInstance()->getValidDir();
|
||||
long modifyTime = GetConfigCurrentTime(sVaildDir);
|
||||
monitor_log_debug("check config modify, preCheckTime:%ld , modifyTime:%ld", iStartTime , modifyTime);
|
||||
return (modifyTime != iStartTime);
|
||||
}
|
||||
|
||||
long ConfigCenterParser::GetConfigCurrentTime(std::string sPath)
|
||||
{
|
||||
FILE * fp = fopen(sPath.c_str(), "r");
|
||||
if(NULL == fp){
|
||||
monitor_log_error("open file[%s] error.", sPath.c_str());
|
||||
return -1;
|
||||
}
|
||||
int fd = fileno(fp);
|
||||
struct stat buf;
|
||||
fstat(fd, &buf);
|
||||
long modifyTime = buf.st_mtime;
|
||||
fclose(fp);
|
||||
return modifyTime;
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
/////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Parse config from config_center
|
||||
// created by chenyujie on Dec 14, 2020
|
||||
////////////////////////////////////////////////////////////////
|
||||
#ifndef CONFIG_CENTER_PARSER_H_
|
||||
#define CONFIG_CENTER_PARSER_H_
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "MarkupSTL.h"
|
||||
#include "log.h"
|
||||
#include "singleton.h"
|
||||
#include "memcheck.h"
|
||||
#include <stdint.h>
|
||||
#include "noncopyable.h"
|
||||
|
||||
#define INDEX_GEN_ROLE "index_gen"
|
||||
#define SEARCH_ROLE "search"
|
||||
#define DISASTER_ROLE_MASTER "master"
|
||||
#define DISASTER_ROLE_REPLICATE "replicate"
|
||||
|
||||
#define DEBUG_LOG_ENABLE 1
|
||||
|
||||
struct ServerNode
|
||||
{
|
||||
std::string ip;
|
||||
std::string port;
|
||||
int weight;
|
||||
std::string role;
|
||||
std::string sDisasterRole;
|
||||
std::string sShardingName;
|
||||
|
||||
bool operator==(const ServerNode& node)
|
||||
{
|
||||
return (ip == node.ip) && (port == node.port) && (weight == node.weight)
|
||||
&& (role == node.role) && (sDisasterRole == node.sDisasterRole) && (sShardingName == node.sShardingName);
|
||||
}
|
||||
|
||||
void operator()(
|
||||
std::string _sIP,
|
||||
std::string _sPort,
|
||||
int _iWeight,
|
||||
std::string _sRole,
|
||||
std::string _sDisasterRole,
|
||||
std::string _sShardingName
|
||||
)
|
||||
{
|
||||
ip = _sIP;
|
||||
port = _sPort;
|
||||
weight = _iWeight;
|
||||
role = _sRole;
|
||||
sDisasterRole = _sDisasterRole;
|
||||
sShardingName = _sShardingName;
|
||||
}
|
||||
};
|
||||
|
||||
enum E_SEARCH_MONITOR_PARSER_ID
|
||||
{
|
||||
E_SEARCH_MONITOR_XML_PARSER = 0,
|
||||
E_SEARCH_MONITOR_JSON_PARSER
|
||||
};
|
||||
|
||||
typedef std::map<std::string, std::vector<ServerNode> > SearchCoreClusterContextType;
|
||||
typedef SearchCoreClusterContextType::iterator SearchCoreClusterCtxIter;
|
||||
|
||||
typedef std::vector<std::pair<std::string, std::string> > DtcClusterContextType;
|
||||
typedef DtcClusterContextType::iterator DtcClusterCtxIter;
|
||||
|
||||
class ParserBase
|
||||
{
|
||||
public:
|
||||
ParserBase(int iParserId)
|
||||
: m_oDtcClusterContext()
|
||||
, m_oSearchCoreClusterContext()
|
||||
, m_iParserId(iParserId)
|
||||
{ };
|
||||
virtual ~ParserBase(){};
|
||||
|
||||
public:
|
||||
const SearchCoreClusterContextType& GetSearchCoreClusterContext() const {return m_oSearchCoreClusterContext;}
|
||||
const DtcClusterContextType& GetDtcClusterContext() const { return m_oDtcClusterContext;}
|
||||
int GetCurParserId() {return m_iParserId;}
|
||||
|
||||
public:
|
||||
virtual bool ParseConfig(std::string path) = 0;
|
||||
|
||||
protected:
|
||||
DtcClusterContextType m_oDtcClusterContext;
|
||||
SearchCoreClusterContextType m_oSearchCoreClusterContext;
|
||||
int m_iParserId;
|
||||
};
|
||||
|
||||
class XmlParser : public ParserBase
|
||||
{
|
||||
public:
|
||||
XmlParser();
|
||||
virtual ~XmlParser(){};
|
||||
|
||||
public:
|
||||
virtual bool ParseConfig(std::string path);
|
||||
|
||||
private:
|
||||
bool ParseConfigCenterConfig(const char *buf, int len);
|
||||
};
|
||||
|
||||
class JsonParser : public ParserBase
|
||||
{
|
||||
public:
|
||||
JsonParser();
|
||||
virtual ~JsonParser(){};
|
||||
|
||||
public:
|
||||
virtual bool ParseConfig(std::string path);
|
||||
};
|
||||
|
||||
/// ************************************************************
|
||||
/// * Different type files parser instance factory class
|
||||
/// ************************************************************
|
||||
class ConfigCenterParser : private noncopyable
|
||||
{
|
||||
public:
|
||||
ConfigCenterParser(){ };
|
||||
virtual ~ConfigCenterParser(){ };
|
||||
|
||||
public:
|
||||
static ConfigCenterParser* Instance()
|
||||
{
|
||||
return CSingleton<ConfigCenterParser>::Instance();
|
||||
};
|
||||
|
||||
static void Destroy()
|
||||
{
|
||||
CSingleton<ConfigCenterParser>::Destroy();
|
||||
};
|
||||
|
||||
public:
|
||||
ParserBase* const CreateInstance(int iParserId = E_SEARCH_MONITOR_JSON_PARSER);
|
||||
bool UpdateParser(ParserBase* const pParser);
|
||||
bool CheckConfigModifyOrNot(long iStartTime);
|
||||
long GetConfigCurrentTime(std::string sPath);
|
||||
};
|
||||
|
||||
#endif
|
@ -1,155 +0,0 @@
|
||||
#include <assert.h>
|
||||
#include <stdarg.h>
|
||||
#include "curl_http.h"
|
||||
// 接收到数据的回调函数
|
||||
// @param curl接收到的数据所在缓冲区
|
||||
// @param [in] size 数据长度
|
||||
// @param [in] nmemb 数据片数量
|
||||
// @param [in/out] 用户自定义指针
|
||||
// @return 获取的数据长度
|
||||
size_t CurlCallback(void* ptr, size_t size, size_t nmemb, void* userp)
|
||||
{
|
||||
size_t read_bytes = size * nmemb;
|
||||
BuffV* buf = static_cast<BuffV*>(userp);
|
||||
|
||||
if(!buf->CheckBuffer(buf->Size() + read_bytes)) {
|
||||
printf("Can't get enough memory!\n");
|
||||
return read_bytes;
|
||||
}
|
||||
|
||||
buf->SetBuffer(ptr, read_bytes);
|
||||
|
||||
//printf("read_bytes:%lu\n", read_bytes);
|
||||
return read_bytes;
|
||||
}
|
||||
|
||||
CurlHttp::CurlHttp()
|
||||
{
|
||||
m_curl = curl_easy_init();
|
||||
assert(NULL != m_curl);
|
||||
|
||||
SetTimeout(5L);
|
||||
|
||||
curl_easy_setopt(m_curl, CURLOPT_NOSIGNAL, 1l);
|
||||
curl_easy_setopt(m_curl, CURLOPT_FOLLOWLOCATION, 1l);
|
||||
curl_easy_setopt(m_curl, CURLOPT_WRITEFUNCTION, CurlCallback);
|
||||
|
||||
memset(m_params, 0, sizeof(m_params));
|
||||
}
|
||||
|
||||
CurlHttp::~CurlHttp()
|
||||
{
|
||||
curl_easy_cleanup(m_curl);
|
||||
}
|
||||
|
||||
void CurlHttp::SetTimeout(long timeout)
|
||||
{
|
||||
// 超时设置(单位:秒)
|
||||
curl_easy_setopt(m_curl, CURLOPT_TIMEOUT, timeout);
|
||||
curl_easy_setopt(m_curl, CURLOPT_CONNECTTIMEOUT, timeout);
|
||||
}
|
||||
|
||||
void CurlHttp::SetTimeout_MS(long timeout_ms)
|
||||
{
|
||||
// 超时设置(单位:毫秒)
|
||||
curl_easy_setopt(m_curl, CURLOPT_TIMEOUT_MS, timeout_ms);
|
||||
curl_easy_setopt(m_curl, CURLOPT_CONNECTTIMEOUT_MS, timeout_ms);
|
||||
}
|
||||
|
||||
void CurlHttp::SetHttpParams(const char* format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
va_start(ap, format);
|
||||
vsnprintf(m_params, sizeof(m_params), format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
int CurlHttp::HttpRequest(const std::string& url, BuffV* buf, bool is_get, struct curl_slist *headers)
|
||||
{
|
||||
if(url.empty()) {
|
||||
return -100000;
|
||||
}
|
||||
|
||||
CURLcode iCurlRet;
|
||||
|
||||
std::string addr = url;
|
||||
if(is_get) {
|
||||
curl_easy_setopt(m_curl, CURLOPT_HTTPGET, 1L);
|
||||
AppendGetParam(&addr);
|
||||
} else {
|
||||
curl_easy_setopt(m_curl, CURLOPT_POST, 1L);
|
||||
curl_easy_setopt(m_curl, CURLOPT_POSTFIELDS, m_params);
|
||||
//struct curl_slist *headers = NULL;
|
||||
headers =curl_slist_append(headers,"Content-Type: application/json");
|
||||
curl_easy_setopt(m_curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
}
|
||||
|
||||
curl_easy_setopt(m_curl, CURLOPT_WRITEDATA, buf);
|
||||
|
||||
//printf("addr:%s\n", addr.c_str());
|
||||
curl_easy_setopt(m_curl, CURLOPT_URL, addr.c_str());
|
||||
|
||||
iCurlRet = curl_easy_perform(m_curl);
|
||||
memset(m_params, 0, sizeof(m_params));
|
||||
curl_slist_free_all(headers);
|
||||
|
||||
return iCurlRet;
|
||||
}
|
||||
|
||||
|
||||
int CurlHttp::HttpRequest(const std::string& url, BuffV* buf, bool is_get, std::string contentType)
|
||||
{
|
||||
if(url.empty()) {
|
||||
return -100000;
|
||||
}
|
||||
|
||||
CURLcode iCurlRet;
|
||||
struct curl_slist *headers = NULL;
|
||||
std::string addr = url;
|
||||
if(is_get) {
|
||||
curl_easy_setopt(m_curl, CURLOPT_HTTPGET, 1L);
|
||||
AppendGetParam(&addr);
|
||||
} else {
|
||||
curl_easy_setopt(m_curl, CURLOPT_POST, 1L);
|
||||
curl_easy_setopt(m_curl, CURLOPT_POSTFIELDS, m_params);
|
||||
|
||||
//headers =curl_slist_append(headers,"Content-Type: application/json");
|
||||
headers =curl_slist_append(headers,contentType.c_str());
|
||||
curl_easy_setopt(m_curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
}
|
||||
|
||||
curl_easy_setopt(m_curl, CURLOPT_WRITEDATA, buf);
|
||||
|
||||
//printf("addr:%s\n", addr.c_str());
|
||||
curl_easy_setopt(m_curl, CURLOPT_URL, addr.c_str());
|
||||
|
||||
iCurlRet = curl_easy_perform(m_curl);
|
||||
memset(m_params, 0, sizeof(m_params));
|
||||
curl_slist_free_all(headers);
|
||||
|
||||
return iCurlRet;
|
||||
}
|
||||
|
||||
// 为get请求追加参数
|
||||
void CurlHttp::AppendGetParam(std::string* addr)
|
||||
{
|
||||
if(m_params[0] == '\0') {
|
||||
return;
|
||||
}
|
||||
|
||||
addr->append("?");
|
||||
addr->append(m_params);
|
||||
}
|
||||
|
||||
int CurlHttp::SetLocalPortRange(long BeginPort, long LocalPortRange)
|
||||
{
|
||||
|
||||
if(BeginPort) {
|
||||
curl_easy_setopt(m_curl, CURLOPT_LOCALPORT, (long)BeginPort);
|
||||
curl_easy_setopt(m_curl, CURLOPT_LOCALPORTRANGE, (long)LocalPortRange);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,137 +0,0 @@
|
||||
// Copyright (c) 2013, Tencent Inc. All Rights Reserved.
|
||||
// Author: Lei Wang (pecywang@tencent.com)
|
||||
// Date: 2013-05-23
|
||||
|
||||
#ifndef _HTTP_SERVICE_H_
|
||||
#define _HTTP_SERVICE_H_
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include "curl/curl.h"
|
||||
|
||||
typedef std::map<std::string, std::string> HttpParams;
|
||||
|
||||
struct BuffV
|
||||
{
|
||||
BuffV() : m_used_len(0), m_total_len(0)
|
||||
{
|
||||
// 默认分配1K
|
||||
m_total_len = 2 * 1024;
|
||||
m_buf = new char[m_total_len];
|
||||
memset(m_buf, 0, sizeof(m_buf));
|
||||
assert(NULL != m_buf);
|
||||
}
|
||||
|
||||
~BuffV()
|
||||
{
|
||||
if(m_buf) {
|
||||
delete m_buf;
|
||||
m_buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// 检测buf的长度是否大于need_len
|
||||
// 如果小于则自动扩展
|
||||
bool CheckBuffer(size_t need_len)
|
||||
{
|
||||
// 最大支持1M
|
||||
if(need_len > 1024 * 1024) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(m_total_len > need_len) {
|
||||
return true;
|
||||
}
|
||||
|
||||
m_total_len = need_len + 256; // 多申请一些
|
||||
char* new_buf = new char[m_total_len];
|
||||
assert(NULL != new_buf);
|
||||
memset(new_buf, 0, m_total_len);
|
||||
|
||||
if(m_used_len) {
|
||||
memcpy(new_buf, m_buf, m_used_len);
|
||||
}
|
||||
|
||||
delete m_buf;
|
||||
m_buf = new_buf;
|
||||
return true;
|
||||
}
|
||||
|
||||
void SetBuffer(const void* ptr, size_t len)
|
||||
{
|
||||
memcpy(m_buf + m_used_len, ptr, len);
|
||||
m_used_len += len;
|
||||
}
|
||||
|
||||
const char* Ptr() const
|
||||
{
|
||||
return m_buf;
|
||||
}
|
||||
|
||||
size_t Size() const
|
||||
{
|
||||
return m_used_len;
|
||||
}
|
||||
|
||||
size_t Capacity() const
|
||||
{
|
||||
return m_total_len;
|
||||
}
|
||||
|
||||
private:
|
||||
char* m_buf;
|
||||
|
||||
// 当前使用长度
|
||||
size_t m_used_len;
|
||||
|
||||
// 当前总长度
|
||||
size_t m_total_len;
|
||||
|
||||
};
|
||||
|
||||
class CurlHttp
|
||||
{
|
||||
public:
|
||||
CurlHttp();
|
||||
virtual ~CurlHttp();
|
||||
|
||||
CurlHttp(const CurlHttp&);
|
||||
CurlHttp& operator = (const CurlHttp&);
|
||||
|
||||
/*
|
||||
static CurlHttp* GetInstance()
|
||||
{
|
||||
static CurlHttp curl_http;
|
||||
return &curl_http;
|
||||
}*/
|
||||
|
||||
// 设置请求的附加参数
|
||||
// 格式:key=value&key=value
|
||||
// POST请求传参需调用此函数
|
||||
void SetHttpParams(const char* format, ...);
|
||||
|
||||
// 设置超时
|
||||
void SetTimeout(long timeout);
|
||||
void SetTimeout_MS(long timeout_ms);
|
||||
|
||||
// 发起GET或POST请求
|
||||
// @param: buf 保存HTTP请求到的body内容
|
||||
int HttpRequest(const std::string& url, BuffV* buf, bool is_get = true, struct curl_slist *headers = NULL);
|
||||
int HttpRequest(const std::string& url, BuffV* buf, bool is_get, std::string contentType);
|
||||
|
||||
int SetLocalPortRange(long BeginPort, long LocalPortRange);
|
||||
|
||||
private:
|
||||
|
||||
void AppendGetParam(std::string* addr);
|
||||
|
||||
private:
|
||||
CURL* m_curl;
|
||||
|
||||
char m_params[50 * 1024];
|
||||
};
|
||||
|
||||
#endif // SODEPEND_HTTP_SERVICE_H_
|
@ -1,115 +0,0 @@
|
||||
#include <stdlib.h>
|
||||
#include <strings.h>
|
||||
// #include <time.h>
|
||||
|
||||
#include <sstream>
|
||||
// #include <set>
|
||||
|
||||
#include "curl_http.h"
|
||||
// #include "detector_construct.h"
|
||||
// #include "detector_constant.h"
|
||||
#include "detector_instance.h"
|
||||
#include "dtcapi.h"
|
||||
#include "log.h"
|
||||
#include "DtcMonitorConfigMgr.h"
|
||||
|
||||
static const int kTableMismatch = -2019;
|
||||
static const int kBadRequest = -56;
|
||||
static const int kServerError = -2033;
|
||||
static const std::string kUnkownAdminCode = "unkown admin code";
|
||||
|
||||
bool CDetectorInstance::DetectAgent(
|
||||
const std::string& accessKey,
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode)
|
||||
{
|
||||
DTC::Server dtc_server;
|
||||
dtc_server.StringKey();
|
||||
dtc_server.SetTableName("*");
|
||||
dtc_server.SetMTimeout(timeout);
|
||||
dtc_server.SetAccessKey(accessKey.c_str());
|
||||
dtc_server.SetAddress(ipWithPort.c_str());
|
||||
#if 0
|
||||
int ping_ret = dtc_server.Ping();
|
||||
if(0 != ping_ret && kTableMismatch != ping_ret)
|
||||
{
|
||||
isAlive = false;
|
||||
errCode = ping_ret;
|
||||
monitor_log_debug("ping error, ret [%d]", ping_ret);
|
||||
}
|
||||
else
|
||||
{
|
||||
isAlive = true;
|
||||
}
|
||||
#else
|
||||
int ret = dtc_server.Connect();
|
||||
if(ret == -CDetectorInstance::eConnectTimeout || ret == -CDetectorInstance::eConnectRefused
|
||||
|| ret == -CDetectorInstance::eHostUnreach)
|
||||
{
|
||||
isAlive = false;
|
||||
errCode = ret;
|
||||
monitor_log_error("connect to agent error, ret [%d], addr:%s", ret, ipWithPort.c_str());
|
||||
}
|
||||
else if (ret != 0)
|
||||
{
|
||||
monitor_log_error("connect to agent error,not handle it now, must pay attention\
|
||||
to it, errno:%d, addr:%s", ret, ipWithPort.c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
isAlive = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CDetectorInstance::DetectDTC(
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode)
|
||||
{
|
||||
isAlive = true;
|
||||
DTC::Server dtc_server;
|
||||
dtc_server.StringKey();
|
||||
dtc_server.SetTableName("*");
|
||||
dtc_server.SetMTimeout(timeout);
|
||||
dtc_server.SetAddress(ipWithPort.c_str());
|
||||
DTC::SvrAdminRequest request(&dtc_server);
|
||||
request.SetAdminCode(DTC::LogoutHB);
|
||||
DTC::Result result;
|
||||
int ret = request.Execute(result);
|
||||
if(0 != ret && kBadRequest != ret)
|
||||
{
|
||||
if(kServerError == ret &&
|
||||
0 == strcasecmp(kUnkownAdminCode.c_str(), result.ErrorMessage()))
|
||||
{
|
||||
isAlive = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
errCode = ret;
|
||||
isAlive = false;
|
||||
monitor_log_error("request error, ret [%d], errcode [%d], errmsg [%s], errfrom [%s], addr:%s",\
|
||||
ret, result.ResultCode(), result.ErrorMessage(), result.ErrorFrom(), ipWithPort.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
CDetectorInstance::CDetectorInstance()
|
||||
{
|
||||
//单位:毫秒(ms),默认5000ms
|
||||
// m_get_route_timeout = 5000;
|
||||
// m_http_url_agent = "";
|
||||
// m_http_url_dtc = "";
|
||||
}
|
||||
|
||||
CDetectorInstance::~CDetectorInstance()
|
||||
{
|
||||
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
//////////////////////////////////////////////////////////////
|
||||
// Copyright (c) 2018, JD Inc. All Rights Reserved.
|
||||
// Author:jinweiwei
|
||||
// Date: 2018-12-28
|
||||
//
|
||||
// copy from jinweiwei newdetector directory
|
||||
// created by qiuyu on Nov 28, 2018
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef __DETECTOR_INSTANCE_H__
|
||||
#define __DETECTOR_INSTANCE_H__
|
||||
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// #include "config.h"
|
||||
// #include "singletonbase.h"
|
||||
|
||||
|
||||
class CDetectorInstance //: public CSingletonBase<CDetectorInstance>
|
||||
{
|
||||
private:
|
||||
enum ErrCode
|
||||
{
|
||||
eConnectTimeout = 110,
|
||||
eConnectRefused = 111,
|
||||
eHostUnreach = 113,
|
||||
};
|
||||
|
||||
public:
|
||||
static bool DetectAgent(
|
||||
const std::string& accessKey,
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode);
|
||||
|
||||
static bool DetectDTC(
|
||||
const std::string& ipWithPort,
|
||||
const int timeout,
|
||||
bool& isAlive,
|
||||
int& errCode);
|
||||
|
||||
private:
|
||||
CDetectorInstance();
|
||||
~CDetectorInstance();
|
||||
|
||||
private:
|
||||
// static std::string m_report_url; // website url
|
||||
// static int m_report_timeout;
|
||||
// static int m_report_length;
|
||||
// static int m_get_route_timeout;
|
||||
// static std::string m_http_url_agent;
|
||||
// static std::string m_http_url_dtc;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user