This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
dongxiaoyan-tsg-autotest/04-CustomLibrary/Custometest/ReportSchema.py

719 lines
34 KiB
Python
Raw Normal View History

import requests
import random
import json
import time
import ipaddress
from builtins import list
# Report纯接口测试正向用例方法不验证数据统计准确性单纯验证接口
#生成随机ipv4或ipv6
MAX_IPV4 = ipaddress.IPv4Address._ALL_ONES # 2 ** 32 - 1
MAX_IPV6 = ipaddress.IPv6Address._ALL_ONES # 2 ** 128 - 1
def random_ipv4():
2021-04-09 14:31:00 +08:00
return ipaddress.IPv4Address._string_from_ip_int(
random.randint(0, MAX_IPV4))
def random_ipv6():
2021-04-09 14:31:00 +08:00
return ipaddress.IPv6Address._string_from_ip_int(
random.randint(0, MAX_IPV6))
#随机生成邮箱地址
def RandomEmail( emailType=None, rang=None):
__emailtype = ["@qq.com", "@163.com", "@126.com", "@189.com"]
# 如果没有指定邮箱类型,默认在 __emailtype中随机一个
if emailType == None:
__randomEmail = random.choice(__emailtype)
else:
__randomEmail = emailType
# 如果没有指定邮箱长度默认在4-10之间随机
if rang == None:
__rang = random.randint(4, 10)
else:
__rang = int(rang)
__Number = "0123456789qbcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPWRSTUVWXYZ"
__randomNumber = "".join(random.choice(__Number) for i in range(__rang))
_email = __randomNumber + __randomEmail
return _email
#获取Schema
2021-04-09 14:31:00 +08:00
def schema(schemauerl,token,logtype):
url ="http://192.168.44.72:8080/v1/log/schema?logType="+logtype
headers = {"Content-Type":"application/x-www-form-urlencoded","Authorization":token}
response = requests.get(url=url,headers=headers)
return response.json()
#获取json串中groupColumnList的值
def groupby(schemajson,logtype,testpoint,field):
dimensions=schemajson["data"]["doc"]["schema_query"]["dimensions"]
dimensions.append("common_recv_time");
randomstr_1=[]
if logtype == "security_event_log" or logtype == "connection_record_log" or logtype == "voip_record_log":
dimensions.remove("common_start_time")
dimensions.remove("common_end_time")
if testpoint == "DataBindings":
randomstr_1.append("common_recv_time")
elif testpoint == "GroupBy":
randomstr_1.append(field)
else:
randomstr_1=random.sample(dimensions, 4)
#定义grp为返回值group的列表
grp=[]
for i in randomstr_1:
a={"name":i}
grp.append(a)
re=[grp,randomstr_1]
print("groupby",re)
return re
#获取json串中queryColumnList的值
def DataBindings(schemajson,randomstr_1,testpoint,field):
#生成queryColumnList列表
metrics=schemajson["data"]["doc"]["schema_query"]["metrics"]
metrics.append("common_log_id")
#在列表里随机元素
randomstr_2=[]
if testpoint == "DataBindings":
randomstr_2.append(field)
else:
randomstr_2=random.sample(metrics,6)
#在聚合列表中去掉groupby中的重复的元素
randomstr_3=array_diff(randomstr_2,randomstr_1)
#将groupby中元素添加到串中
qul=[]
for i in randomstr_1:
a={"name":i}
qul.append(a)
fields = schemajson["data"]["fields"]
list_1=["sum","min","max","avg","count"]
list_2=["count","count_distinct"]
if testpoint == "DataBindings":
for i in randomstr_3:
for j in fields:
if i == j["name"] :
jtype=j["type"]
label=i
sun=1
if jtype == "int" or jtype == "long" or jtype == "float" or jtype == "double":
for Aggregate in list_1:
randomstr_4={"name":i,"expression":Aggregate,"label":label}
qul.append(randomstr_4)
label=label+str(sun)
sun+=1
elif jtype == "randomstring" or jtype == "date" or jtype == "timestamp" or jtype == "string":
for Aggregate in list_2:
randomstr_4={"name":i,"expression":Aggregate,"label":label}
qul.append(randomstr_4)
label = label + str(sun)
sun += 1
else:
for i in randomstr_3:
for j in fields:
if i == j["name"]:
jtype = j["type"]
if jtype == "int" or jtype == "long" or jtype == "float" or jtype == "double":
radomlist = random.sample(list_1, 1)
randomstr_4 = {"name": i, "expression": radomlist[0]}
qul.append(randomstr_4)
elif jtype == "randomstring" or jtype == "date" or jtype == "timestamp" or jtype == "string":
randomlist = random.sample(list_2, 1)
randomstr_4 = {"name": i, "expression": randomlist[0]}
qul.append(randomstr_4)
2021-04-12 11:21:10 +08:00
print("DataBindings",qul)
return qul
# #去除a列表中存在的b的元素
def array_diff(a, b):
#定义空列表
c=[]
#range(len(a))取的为列表a的索引根据a的
for i in range(len(a)):
#取出索引对应的值
t=a[i]
#判断值是否存在在序列b中
if t not in b:
#如果序列不在b中则写入序列c
c.append(t)
#返回序列cc就是列表a去除列表b之后的元素
return c
def filterCondition(schemajson,testpoint,field):
number = random.randint(0,100000)
randomstr= random.choice('abcdefghijklmnopqrstuvwxyz')
schemafilters=schemajson["data"]["doc"]["schema_query"]["filters"]
list1=[]
if testpoint=="Filter":
list1.append(field)
else:
list1=random.sample(schemafilters, 4)
#获取不同属性支持的部不同操作
fields = schemajson["data"]["fields"]
operator = schemajson["data"]["doc"]["schema_query"]["references"]["operator"]
andConditions=[]
for i in list1:
#遍历fields列表
for k in fields:
#当filters列表值等于fields的name时
if i == k["name"]:
name = k["name"]
doc = k["doc"]
#获取无任何特殊说明列:
if doc == None:
type1 = k["type"]
if type1 == "int" or type1 == "long":
orConditions_list=[]
Operator=["=","!=",">","<",">=","<="]
if testpoint=="Filter":
for op in Operator:
value=[str(number)]
Field={"name":name,"expression":op,"value":value,"type":type1}
orConditions_list.append(Field)
else:
randomOperator= random.sample(Operator, 1)
value=[str(number)]
Field={"name":name,"expression":randomOperator[0],"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
elif type1 == "string":
orConditions_list=[]
Operator=["=","!=","Like","Not Like","notEmpty","empty"]
if testpoint=="Filter":
for op in Operator:
value=[]
if op == "=" or op == "!=":
value.append(str(number))
elif op == "Like" or op == "Not Like":
value.append(randomstr)
elif op=="notEmpty" or op == "empty":
value=[]
Field={"name":name,"expression":op,"value":value,"type":type1}
orConditions_list.append(Field)
else:
randomOperator_1 = random.sample(Operator, 1)
randomOperator = randomOperator_1[0]
value = []
if randomOperator == "=" or randomOperator == "!=":
value.append(str(number))
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(randomstr)
elif randomOperator == "notEmpty":
value = []
Field = {"name": name, "expression": randomOperator, "value": value, "type": type1}
orConditions_list.append(Field)
orConditions = {"orConditions": orConditions_list}
andConditions.append(orConditions)
else:
if k["doc"]["constraints"]== None:
type1 = k["type"]
if type1 == "int" or type1 == "long":
orConditions_list=[]
Operator=["=","!=",">","<",">=","<="]
if testpoint == "Filter":
for op in Operator:
value=[str(number)]
Field={"name":name,"expression":op,"value":value,"type":type1}
orConditions_list.append(Field)
else:
randomOperator= random.sample(Operator, 1)
value=[str(number)]
Field={"name":name,"expression":randomOperator[0],"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
elif type1 == "string":
orConditions_list=[]
Operator=["=","!=","Like","Not Like","notEmpty","empty"]
if testpoint == "Filter":
for op in Operator:
randomOperator = op
value = []
if randomOperator == "=" or randomOperator == "!=":
value.append(str(number))
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(randomstr)
elif randomOperator == "notEmpty":
value = []
Field = {"name": name, "expression": randomOperator, "value": value, "type": type1}
orConditions_list.append(Field)
else:
randomOperator_1= random.sample(Operator, 1)
randomOperator=randomOperator_1[0]
value=[]
if randomOperator == "=" or randomOperator == "!=":
value.append(str(number))
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(randomstr)
elif randomOperator=="notEmpty":
value=[]
Field={"name":name,"expression":randomOperator,"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
else:
if k["doc"]["constraints"]["operator_functions"]==None:
conrandomstraints=k["doc"]["constraints"]
type1 = k["type"]
if type1 == "int" or type1 == "long":
orConditions_list = []
Operator=["=","!=",">","<",">=","<="]
if testpoint == "Filter":
for op in Operator:
randomOperator = op
if conrandomstraints["type"] == "timestamp":
# 获取当前时间戳
t = int(time.time())
value = [str(t)]
Field = {"name": name, "expression": randomOperator, "value": value,
"type": type1}
orConditions_list.append(Field)
else:
randomOperator_1= random.sample(Operator, 1)
randomOperator=randomOperator_1[0]
if conrandomstraints["type"] == "timestamp":
#获取当前时间戳
t = int(time.time())
value=[str(t)]
Field={"name":name,"expression":randomOperator,"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
elif type1 == "string":
orConditions_list = []
Operator=["=","!=","Like","Not Like","notEmpty","empty"]
if testpoint == "Filter":
if conrandomstraints["type"] == "ip":
for op in Operator:
# 获取ip
ip = random_ipv4()
value = []
if op == "=" or op == "!=":
value.append(ip)
elif op == "Like" or op == "Not Like":
value.append(ip)
elif op == "notEmpty":
value = []
Field = {"name": name, "expression": op, "value": value,"type": type1}
orConditions_list.append(Field)
elif conrandomstraints["type"] == "email":
for op in Operator:
randomOperator = op
Operator = ["=", "!=", "Like", "Not Like", "notEmpty","empty"]
randomOperator_1 = random.sample(Operator, 1)
randomOperator = randomOperator_1[0]
# 获取ip
emil = RandomEmail()
value = []
if randomOperator == "=" or randomOperator == "!=":
value.append(emil)
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(emil)
elif randomOperator == "notEmpty":
value = []
Field = {"name": name, "expression": randomOperator, "value": value,
"type": type1}
orConditions_list.append(Field)
else:
randomOperator_1= random.sample(Operator, 1)
randomOperator=randomOperator_1[0]
if conrandomstraints["type"] == "ip":
#获取ip
ip =random_ipv4()
value=[]
if randomOperator == "=" or randomOperator == "!=":
value.append(ip)
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(ip)
elif randomOperator=="notEmpty":
value=[]
Field={"name":name,"expression":randomOperator,"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
elif conrandomstraints["type"] == "email":
Operator=["=","!=","Like","Not Like","notEmpty","empty"]
randomOperator_1= random.sample(Operator, 1)
randomOperator=randomOperator_1[0]
#获取ip
emil =RandomEmail()
value=[]
if randomOperator == "=" or randomOperator == "!=":
value.append(emil)
elif randomOperator == "Like" or randomOperator == "Not Like":
value.append(emil)
elif randomOperator=="notEmpty":
value=[]
Field={"name":name,"expression":randomOperator,"value":value,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
else:
type1 = k["type"]
orConditions_list=[]
operator1 = k["doc"]["constraints"]["operator_functions"]
operator2 = operator1.split(",")
if testpoint == "Filter":
for op in operator2:
operatordata = k["doc"]["data"]
code = []
for i in operatordata:
code_1 = i["code"]
code.append(code_1)
for co in code:
Field = {"name": name, "expression": op, "value": co, "type": type1}
orConditions_list.append(Field)
else:
operator3=random.sample(operator2,1)
operatordata=k["doc"]["data"]
code=[]
for i in operatordata:
code_1=i["code"]
code.append(code_1)
code2=random.sample(code, 1)
Field={"name":name,"expression":operator3[0],"value":code2,"type":type1}
orConditions_list.append(Field)
orConditions={"orConditions":orConditions_list}
andConditions.append(orConditions)
filterCondition={"andConditions":andConditions}
2021-04-12 11:21:10 +08:00
print("filterCondition",filterCondition)
return filterCondition
#获取having条件的串
def havingjson(schemajson,testpoint,field):
number = random.randint(0,100000)
schemametrics=schemajson["data"]["doc"]["schema_query"]["metrics"]
aggregation = schemajson["data"]["doc"]["schema_query"]["references"]["aggregation"]
schemametrics.append("common_log_id")
metricslist=[]
if testpoint == "Having":
metricslist.append(field)
else:
metricslist=random.sample(schemametrics, 4)
fields = schemajson["data"]["fields"]
operator=["=","!=",">","<",">=","<="]
Aggregate=["COUNT","AVG","SUM","MAX","MIN"]
andConditions_list=[]
#遍历的到的having条件列表
for i in metricslist:
for j in fields:
if i == j["name"]:
name = j["name"]
type1=j["type"]
for v in aggregation:
if type1 == v["type"]:
orConditions_list=[]
if v["type"] != "string":
functionslist=Aggregate
else:
functionsstr=v["functions"]
functionslist = functionsstr.split(",")
if field == "common_log_id":
functionslist=["COUNT"]
if testpoint == "Having":
for functions_1 in functionslist:
for operator_1 in operator:
havingdict = {"name": name, "function": str.lower(functions_1),
"expression": operator_1, "value": str(number)}
orConditions_list.append(havingdict)
orConditions = {"orConditions": orConditions_list}
andConditions_list.append(orConditions)
else:
functions_1=random.sample(functionslist, 1)
if functions_1=="COUNT_DISTINCT" and type1 != "string":
functions_1=random.sample(functionslist, 1)
operator_1=random.sample(operator, 1)
havingdict={"name":name,"function":str.lower(functions_1[0]),"expression":operator_1[0],"value":str(number)}
orConditions_list.append(havingdict)
orConditions={"orConditions":orConditions_list}
andConditions_list.append(orConditions)
havingCondition={"andConditions":andConditions_list}
2021-04-12 11:21:10 +08:00
print("having",havingCondition)
return havingCondition
#拼接字符串
def datasetjson(schemauerl,token,testname,logtype,testpoint,field):
2021-04-09 14:31:00 +08:00
schema_new=schema(schemauerl,token,logtype)
group_re=groupby(schema_new,logtype,testpoint,field)
groupColumnList=group_re[0]
group_randomstr=group_re[1]
queryColumnList=DataBindings(schema_new,group_randomstr,testpoint,field)
filterCondition_1=filterCondition(schema_new,testpoint,field)
havingjson_1=havingjson(schema_new,testpoint,field)
datasetdict = {
"list": {
"name":testname,
"logType": logtype,
"groupColumnList":groupColumnList,
"queryColumnList":queryColumnList,
"filterCondition":filterCondition_1,
2021-04-09 14:31:00 +08:00
"havingCondition":havingjson_1
}
}
print(datasetdict)
2021-04-12 11:21:10 +08:00
print("datasetjson",json.dumps(datasetdict))
return json.dumps(datasetdict)
2021-04-09 14:31:00 +08:00
#拼接char的json串
def charjson(schemaurl,token,queryColumnList,groupColumnList,datasetid,testname,logtype):
print("queryColumnList",queryColumnList)
schema_new=schema(schemaurl,token,logtype)
fields = schema_new["data"]["fields"]
2021-04-09 14:31:00 +08:00
# 获取条件的label
namelist=[]
for i in queryColumnList:
for j in fields:
2021-04-09 14:31:00 +08:00
if i["name"] == j["name"]:
j_label=j["label"]
namelist.append(j_label)
print("namelist",namelist)
#获取聚合条件的label
groupColumnlaberList=[]
for i in groupColumnList:
for j in fields:
if i["name"] == j["name"]:
j_label=j["label"]
groupColumnlaberList.append(j_label)
print("groupColumnlaberList",groupColumnlaberList)
#图表类型列表
chartType_1=["line","pie","bar","area","table"]
chartType_2=["pie","bar","table"]
chartType=[]
2021-04-09 14:31:00 +08:00
# #随机选择图表类型
s=1
for i in namelist:
if i == "Receive Time" or i == "Start Time" or i == "End Time":
2021-04-09 14:31:00 +08:00
s+=1
if s != 1:
chartType=random.sample(chartType_1, 1)
else:
chartType=random.sample(chartType_2, 1)
chardict={}
print("chartType",chartType)
if chartType[0] == "line" or chartType[0] == "area":
dataBinding=[]
#将时间条件赋值给dataBinding
for j in namelist:
if j == "Receive Time" or j == "Start Time" or j == "End Time":
dataBinding.append(j)
2021-04-09 14:31:00 +08:00
timelin={
"dataBinding": dataBinding[0],
"format": "Time"
}
print("timelin",timelin)
namelist.remove(dataBinding[0]) #从统计查询数据列对象内去掉时间条件
groupColumnlaberList.remove(dataBinding[0]) #从聚合条件内去掉时间的条件
for i in groupColumnlaberList: #从统计查询条件内去掉聚合条件内的值
namelist.remove(i)
print("namelistrome",namelist)
linlist=[]
for i in namelist:
lindict={
"dataBinding": i,
"type": "Line Up",
"format": "Default",
}
linlist.append(lindict)
listdict={
"name": testname,
"datasetId": datasetid,
"datasetName": "",
"chartType": chartType[0],
"dataTop": 0,
"orderBy": "",
"orderDesc": 0,
"drilldownTop": 0,
"timeline": timelin,
"line": linlist
}
chardict={"list": listdict}
elif chartType[0] == "pie" or chartType[0] == "bar":
xAxisdataBinding=random.sample(groupColumnlaberList, 1)
xAxisdict={
"dataBinding": xAxisdataBinding[0],
"dataTop": 5,
"dataType": ""
}
for i in groupColumnlaberList:
namelist.remove(i)
yAxisBinding=random.sample(namelist, 1)
yAxisdict={
"dataBinding": yAxisBinding[0],
"format": "Default",
}
yAxislist=[yAxisdict]
listdict={
"name": testname,
"datasetId": datasetid,
"datasetName": "",
"chartType": chartType[0],
"dataTop": 0,
"orderBy": "",
"orderDesc": "",
"xAxis": xAxisdict,
"yAxis": yAxislist
}
chardict={"list": listdict}
elif chartType[0] == "table":
columnslist=[]
for i in namelist:
dataBindings={
"dataType": "",
"dataBinding": i,
"format": "Default",
}
dataBindingslist=[]
dataBindingslist.append(dataBindings)
columnsdict={
"title": i,
"width": 0,
"dataBindings": dataBindingslist
}
columnslist.append(columnsdict)
listdict={
"name": testname,
"datasetId": datasetid,
"datasetName": "",
"chartType": "table",
"dataTop": 5,
"orderBy": "",
"orderDesc": "",
"drilldownTop": 5,
"tableType": "Regular",
"columns": columnslist
}
chardict={"list": listdict}
2021-04-12 11:21:10 +08:00
print("charjson",json.dumps(chardict))
2021-04-09 14:31:00 +08:00
return json.dumps(chardict)
2021-04-09 14:31:00 +08:00
def Reportsjson(chartId,testname):
charlist=[]
chardict={
"chartId": chartId,
"timeGranulartiy": 1,
"timeUnit": "",
# "disabled": true
}
charlist.append(chardict)
reportJobList=[]
reportJobdct_1={
"rangeType": "last",
"rangeInterval": 1,
"rangeUnit": "week",
"jobName": testname,
"scheduleId": "",
"chartList": charlist,
"isNotice": 0,
"noticeMethod": "",
"startTime": "",
"endTime": "",
"filterCondition": None,
"isDisplayTrafficTrend": 1
}
reportJobdct_2={"reportJobList": reportJobdct_1}
2021-04-12 11:21:10 +08:00
print("reportjson",json.dumps(reportJobdct_2))
2021-04-09 14:31:00 +08:00
return json.dumps(reportJobdct_2)
def ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,datasetgeturl,chargeturl,testname,logtype,testpoint,field):
headers = {"Content-Type": "application/json","Authorization": token}
2021-04-09 14:31:00 +08:00
#dataset生成json串并发送请求
_datasetjson=datasetjson(schemaurl,token,testname,logtype,testpoint,field)
2021-04-09 14:31:00 +08:00
response1 = requests.post(url=dataseturl, data=_datasetjson, headers=headers)
print("返回数据1",response1)
code = response1.json()["code"]
2021-04-09 14:31:00 +08:00
print("datasetcode:",code)
assert code == 200
2021-04-09 14:31:00 +08:00
# 获取dataset的id
datasetget=requests.get(url=datasetgeturl,headers=headers)
dasetget=datasetget.json()
2021-04-09 14:31:00 +08:00
datesetid=dasetget["data"]["list"][0]["id"]
Deleteinterfaces(dataseturl,token,datesetid)
# _datasetjson=json.loads(_datasetjson)
# queryColumnList=_datasetjson["list"]["queryColumnList"]
# groupColumnList=_datasetjson["list"]["groupColumnList"]
2021-04-09 14:31:00 +08:00
#生成charlibrariesjson串
# charlibrariesjson=charjson(schemaurl, token,queryColumnList,groupColumnList,datesetid,testname,logtype)
# response2 = requests.post(url=charurl, data=charlibrariesjson, headers=headers)
# code = response2.json()["code"]
# assert code == 200
#
# #获取char libraries的id
# charget=requests.get(url=chargeturl,headers=headers)
# charget=charget.json()
# charid=charget["data"]["list"][0]["id"]
#
# #report生成json串并发送请求
# reportjson=Reportsjson(charid,testname)
# response3 = requests.post(url=repporturl, data=reportjson, headers=headers)
# code = response3.json()["code"]
# assert code == 200
#
# #循环调用ReportInterfaceTest方法
# def ReportTest(host,token,dataseturl,charurl,repporturl,logtypelist):
# for logtype in logtypelist:
# testname="Report"+logtype
# datasetgeturl=dataseturl+"?pageSize=20&pageNo=1&id=&name="+testname+"&logType=&opStartTime=&opEndTime=&opUser="
# chargeturl=charurl+"?pageSize=20&pageNo=1&id=&name="+testname+"&opUser="
# schemaurl="http://"+host+":8080/v1/log/schema?logType="+logtype
# ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,datasetgeturl,chargeturl,testname,logtype)
def Deleteinterfaces(url,token,id):
headers = {"Content-Type": "application/json","Authorization": token}
datedict={"ids":[id]}
datajson=json.dumps(datedict)
response1 = requests.delete(url=url, data=datajson, headers=headers)
def ReportPositiveTest(host,port,token,dataseturl,charurl,repporturl,logtypelist):
testpoint=["GroupBy","DataBindings","Filter","Having"]
2021-04-12 11:21:10 +08:00
for logtype in logtypelist:
schemaurl="http://"+host+":"+port+"/v1/log/schema?logType="+logtype
schema_new=schema(schemaurl,token,logtype)
metrics = schema_new["data"]["doc"]["schema_query"]["metrics"]
schemafilters = schema_new["data"]["doc"]["schema_query"]["filters"]
dimensions = schema_new["data"]["doc"]["schema_query"]["dimensions"]
dimensions.append("common_recv_time");
metrics.append("common_log_id")
for j in testpoint:
if j == "GroupBy":
for filter in dimensions:
testname="Report"+logtype+j+filter
dataset_geturl=dataseturl+"?pageSize=20&pageNo=1&id=&name="+testname+"&logType=&opStartTime=&opEndTime=&opUser="
char_geturl=charurl+"?pageSize=20&pageNo=1&id=&name="+testname+"&opUser="
ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,dataset_geturl,char_geturl,testname,logtype,j,filter)
if j == "DataBindings":
for filter in metrics:
testname="Report"+logtype+j+filter
dataset_geturl=dataseturl+"?pageSize=20&pageNo=1&id=&name="+testname+"&logType=&opStartTime=&opEndTime=&opUser="
char_geturl=charurl+"?pageSize=20&pageNo=1&id=&name="+testname+"&opUser="
ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,dataset_geturl,char_geturl,testname,logtype,j,filter)
if j == "Filter" :
for filter in schemafilters:
testname="Report"+logtype+j+filter
dataset_geturl=dataseturl+"?pageSize=20&pageNo=1&id=&name="+testname+"&logType=&opStartTime=&opEndTime=&opUser="
char_geturl=charurl+"?pageSize=20&pageNo=1&id=&name="+testname+"&opUser="
ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,dataset_geturl,char_geturl,testname,logtype,j,filter)
if j == "Having" :
for filter in metrics:
testname="Report"+logtype+j+filter
dataset_geturl=dataseturl+"?pageSize=20&pageNo=1&id=&name="+testname+"&logType=&opStartTime=&opEndTime=&opUser="
char_geturl=charurl+"?pageSize=20&pageNo=1&id=&name="+testname+"&opUser="
ReportInterfaceTest(schemaurl,token,dataseturl,charurl,repporturl,dataset_geturl,char_geturl,testname,logtype,j,filter)
2021-04-09 14:31:00 +08:00