Merge branch 'main' into multi-tenant

This commit is contained in:
2024-07-30 11:38:06 +08:00
67 changed files with 2259 additions and 2509 deletions

View File

@@ -6,7 +6,6 @@ import (
"be.ems/src/framework/datasource"
"be.ems/src/framework/i18n"
"be.ems/src/framework/utils/ctx"
"be.ems/src/framework/utils/date"
"be.ems/src/framework/vo/result"
"be.ems/src/modules/network_data/model"
neDataService "be.ems/src/modules/network_data/service"
@@ -40,18 +39,6 @@ func (s *PerfKPIController) GoldKPI(c *gin.Context) {
c.JSON(400, result.CodeMsg(400, i18n.TKey(language, "app.common.err400")))
return
}
// 时间格式校验
startTime := date.ParseStrToDate(querys.StartTime, date.YYYY_MM_DD_HH_MM_SS)
if startTime.IsZero() {
c.JSON(400, result.CodeMsg(400, i18n.TKey(language, "app.common.err400")))
return
}
endTime := date.ParseStrToDate(querys.EndTime, date.YYYY_MM_DD_HH_MM_SS)
if endTime.IsZero() {
c.JSON(400, result.CodeMsg(400, i18n.TKey(language, "app.common.err400")))
return
}
if querys.Interval < 5 || querys.Interval > 3600 {
c.JSON(400, result.CodeMsg(400, i18n.TKey(language, "app.common.err400")))
return

View File

@@ -15,6 +15,7 @@ import (
"be.ems/src/framework/vo/result"
"be.ems/src/modules/network_data/model"
neDataService "be.ems/src/modules/network_data/service"
neFetchlink "be.ems/src/modules/network_element/fetch_link"
neService "be.ems/src/modules/network_element/service"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
@@ -411,7 +412,11 @@ func (s *UDMAuthController) Export(c *gin.Context) {
data := [][]string{}
data = append(data, []string{"imsi", "ki", "algo", "amf", "opc"})
for _, v := range list {
data = append(data, []string{v.IMSI, v.Ki, v.AlgoIndex, v.Amf, v.Opc})
opc := v.Opc
if opc == "-" {
opc = ""
}
data = append(data, []string{v.IMSI, v.Ki, v.AlgoIndex, v.Amf, opc})
}
// 输出到文件
err := file.WriterFileCSV(data, filePath)
@@ -425,7 +430,11 @@ func (s *UDMAuthController) Export(c *gin.Context) {
// 转换数据
data := [][]string{}
for _, v := range list {
data = append(data, []string{v.IMSI, v.Ki, v.AlgoIndex, v.Amf, v.Opc})
opc := v.Opc
if opc == "-" {
opc = ""
}
data = append(data, []string{v.IMSI, v.Ki, v.AlgoIndex, v.Amf, opc})
}
// 输出到文件
err = file.WriterFileTXT(data, ",", filePath)
@@ -446,6 +455,8 @@ func (s *UDMAuthController) Import(c *gin.Context) {
var body struct {
NeId string `json:"neId" binding:"required"`
UploadPath string `json:"uploadPath" binding:"required"`
TypeVal string `json:"typeVal" binding:"required,oneof=default k4"`
TypeData any `json:"typeData"`
}
if err := c.ShouldBindBodyWith(&body, binding.JSON); err != nil {
c.JSON(400, result.CodeMsg(400, i18n.TKey(language, "app.common.err400")))
@@ -497,16 +508,30 @@ func (s *UDMAuthController) Import(c *gin.Context) {
}
defer telnetClient.Close()
// 发送MML
cmd := fmt.Sprintf("import authdat:path=%s", neFilePath)
data, err := telnet.ConvertToStr(telnetClient, cmd)
if err != nil {
c.JSON(200, result.ErrMsg(err.Error()))
// 结果信息
var resultMsg string
var resultErr error
// 默认的情况 发送MML
if body.TypeVal == "default" {
cmd := fmt.Sprintf("import authdat:path=%s", neFilePath)
resultMsg, resultErr = telnet.ConvertToStr(telnetClient, cmd)
}
// K4类型发特定请求
if body.TypeVal == "k4" {
resultMsg, resultErr = neFetchlink.UDMImportAuth(neInfo.IP, map[string]any{
"path": neFilePath, "k4": body.TypeData,
})
}
if resultErr != nil {
c.JSON(200, result.ErrMsg(resultErr.Error()))
return
}
// 命令ok时
if strings.Contains(data, "ok") {
if strings.Contains(resultMsg, "ok") {
if strings.HasSuffix(body.UploadPath, ".csv") {
data := file.ReadFileCSV(localFilePath)
neId := ""
@@ -518,5 +543,5 @@ func (s *UDMAuthController) Import(c *gin.Context) {
go s.udmAuthService.InsertData(neId, "txt", data)
}
}
c.JSON(200, result.OkMsg(data))
c.JSON(200, result.OkMsg(resultMsg))
}

View File

@@ -14,15 +14,6 @@ type CDREventSMF struct {
TenantID string `json:"tenantID" gorm:"column:tenant_id"`
TenantName string `json:"tenantName" gorm:"column:tenant_name"`
// ====== 非数据库字段属性 ======
// RecordType string `json:"recordType" gorm:"column:record_type"`
// ChargingID string `json:"chargingID" gorm:"column:charging_id"`
// SubscriberID string `json:"subscriberID" gorm:"column:subscriber_id"`
// Duration string `json:"duration" gorm:"column:duration"`
// DataVolumeUplink string `json:"dataVolumeUplink" gorm:"column:data_volume_uplink"`
// DataVolumeDownlink string `json:"dataVolumeDownlink" gorm:"column:data_volume_downlink"`
// DataTotalVolume string `json:"dataTotalVolume" gorm:"column:data_total_volume"`
// PDUAddress string `json:"pduAddress" gorm:"column:pdu_address"`
}
// CDREventSMFQuery CDR会话对象SMF查询参数结构体

View File

@@ -17,7 +17,7 @@ type GoldKPIQuery struct {
NeID string `form:"neId"`
StartTime string `form:"startTime" binding:"required"`
EndTime string `form:"endTime" binding:"required"`
Interval int64 `form:"interval" binding:"required"`
Interval int64 `form:"interval" binding:"required,oneof=5 60 300 900 1800 3600"`
RmUID string `form:"rmUID"`
SortField string `form:"sortField" binding:"omitempty,oneof=timeGroup"`
SortOrder string `form:"sortOrder" binding:"omitempty,oneof=asc desc"`

View File

@@ -91,14 +91,21 @@ func (r *CDREventIMSImpl) SelectPage(querys model.CDREventIMSQuery) map[string]a
conditions = append(conditions, "JSON_EXTRACT(cdr_json, '$.calledParty') = ?")
params = append(params, querys.CalledParty)
}
// MySQL8支持的
// if querys.RecordType != "" {
// recordTypes := strings.Split(querys.RecordType, ",")
// placeholder := repo.KeyPlaceholderByQuery(len(recordTypes))
// conditions = append(conditions, fmt.Sprintf("JSON_EXTRACT(cdr_json, '$.recordType') in (%s)", placeholder))
// for _, recordType := range recordTypes {
// params = append(params, recordType)
// }
// }
// Mariadb不支持json in查询改or
if querys.RecordType != "" {
recordTypes := strings.Split(querys.RecordType, ",")
// placeholder := repo.KeyPlaceholderByQuery(len(recordTypes))
// conditions = append(conditions, fmt.Sprintf("JSON_EXTRACT(cdr_json, '$.recordType') in (%s)", placeholder))
var querys []string
for _, recordType := range recordTypes {
querys = append(querys, fmt.Sprintf("JSON_EXTRACT(cdr_json, '$.recordType') = '%s'", recordType))
//params = append(params, recordType)
}
conditions = append(conditions, "("+strings.Join(querys, " OR ")+")")
}

View File

@@ -7,15 +7,9 @@ type IPerfKPI interface {
// SelectGoldKPI 通过网元指标数据信息
SelectGoldKPI(query model.GoldKPIQuery, kpiIds []string) []map[string]any
// select from new kpi report table, exp. kpi_report_upf
SelectKpiReport(query model.GoldKPIQuery, kpiIds []string) []map[string]any
// SelectGoldKPITitle 网元对应的指标名称
SelectGoldKPITitle(neType string) []model.GoldKPITitle
// SelectUPFTotalFlow 查询UPF总流量 N3上行 N6下行
SelectUPFTotalFlow(neType, rmUID, startDate, endDate string) map[string]any
// select upf throughput from new kpi_report
SelectUPFThroughput(neType, rmUID, startDate, endDate string) map[string]any
}

View File

@@ -17,76 +17,6 @@ type PerfKPIImpl struct{}
// SelectGoldKPI 通过网元指标数据信息
func (r *PerfKPIImpl) SelectGoldKPI(query model.GoldKPIQuery, kpiIds []string) []map[string]any {
// 查询条件拼接
var conditions []string
var params []any
if query.RmUID != "" {
conditions = append(conditions, "gk.rm_uid = ?")
params = append(params, query.RmUID)
}
if query.NeType != "" {
conditions = append(conditions, "gk.ne_type = ?")
params = append(params, query.NeType)
}
if query.StartTime != "" {
conditions = append(conditions, "gk.start_time >= ?")
params = append(params, query.StartTime)
}
if query.EndTime != "" {
conditions = append(conditions, "gk.start_time <= ?")
params = append(params, query.EndTime)
}
// 构建查询条件语句
whereSql := ""
if len(conditions) > 0 {
whereSql += " where " + strings.Join(conditions, " and ")
}
// 查询字段列
timeFormat := "DATE_FORMAT(gk.start_time, '%Y-%m-%d %H:%i:')"
secondGroup := fmt.Sprintf("LPAD(FLOOR(SECOND(gk.start_time) / %d) * %d, 2, '0')", query.Interval, query.Interval)
groupByField := fmt.Sprintf("CONCAT( %s, %s ) AS timeGroup", timeFormat, secondGroup)
if query.Interval > 60 {
minute := query.Interval / 60
timeFormat = "DATE_FORMAT(gk.start_time, '%Y-%m-%d %H:')"
minuteGroup := fmt.Sprintf("LPAD(FLOOR(MINUTE(gk.start_time) / %d) * %d, 2, '0')", minute, minute)
groupByField = fmt.Sprintf("CONCAT( %s, %s ) AS timeGroup", timeFormat, minuteGroup)
}
var fields = []string{
groupByField,
"min(CASE WHEN gk.index != '' THEN gk.index ELSE 0 END) AS startIndex",
"min(CASE WHEN gk.ne_type != '' THEN gk.ne_type ELSE 0 END) AS neType",
"min(CASE WHEN gk.ne_name != '' THEN gk.ne_name ELSE 0 END) AS neName",
}
for _, kid := range kpiIds {
// 特殊字段只取最后一次收到的非0值
if kid == "AMF.01" || kid == "UDM.01" || kid == "UDM.02" || kid == "UDM.03" || kid == "SMF.01" {
str := fmt.Sprintf("IFNULL(SUBSTRING_INDEX(GROUP_CONCAT( CASE WHEN gk.kpi_id = '%s' and gk.VALUE != 0 THEN gk.VALUE END ), ',', 1), 0) AS '%s'", kid, kid)
fields = append(fields, str)
} else {
str := fmt.Sprintf("sum(CASE WHEN gk.kpi_id = '%s' THEN gk.value ELSE 0 END) AS '%s'", kid, kid)
fields = append(fields, str)
}
}
fieldsSql := strings.Join(fields, ",")
// 查询数据
if query.SortField == "" {
query.SortField = "timeGroup"
}
if query.SortOrder == "" {
query.SortOrder = "desc"
}
orderSql := fmt.Sprintf(" order by %s %s", query.SortField, query.SortOrder)
querySql := fmt.Sprintf("SELECT %s FROM gold_kpi gk %s GROUP BY timeGroup %s", fieldsSql, whereSql, orderSql)
results, err := datasource.RawDB("", querySql, params)
if err != nil {
logger.Errorf("query err => %v", err)
}
return results
}
func (r *PerfKPIImpl) SelectKpiReport(query model.GoldKPIQuery, kpiIds []string) []map[string]any {
// 查询条件拼接
var conditions []string
var params []any
@@ -100,43 +30,15 @@ func (r *PerfKPIImpl) SelectKpiReport(query model.GoldKPIQuery, kpiIds []string)
// params = append(params, query.NeType)
tableName += strings.ToLower(query.NeType)
}
var dateStr1, dateStr2, timeStr1, timeStr2 string
if query.StartTime != "" {
dateStr1 = query.StartTime[:10]
timeStr1 = query.StartTime[11:]
conditions = append(conditions, "gk.created_at >= ?")
params = append(params, query.StartTime)
}
if query.EndTime != "" {
dateStr2 = query.EndTime[:10]
timeStr2 = query.EndTime[11:]
}
if dateStr1 == dateStr2 && dateStr1 != "" {
conditions = append(conditions, "gk.`date` = ?")
params = append(params, dateStr1)
conditions = append(conditions, "gk.`start_time` >= ?")
params = append(params, timeStr1)
conditions = append(conditions, "gk.`start_time` <= ?")
params = append(params, timeStr2)
} else {
if dateStr1 != "" {
conditions = append(conditions, "(gk.`date` > ? OR (gk.`date` = ? AND gk.`start_time` >= ?))")
params = append(params, dateStr1, dateStr1, timeStr1)
}
if dateStr2 != "" {
conditions = append(conditions, "(gk.`date` < ? OR (gk.`date` = ? AND gk.`start_time` <= ?))")
params = append(params, dateStr2, dateStr2, timeStr2)
}
conditions = append(conditions, "gk.created_at <= ?")
params = append(params, query.EndTime)
}
// var dateTimeStr string = "CONCAT(gk.`date`, \" \", gk.start_time)"
// if query.StartTime != "" {
// conditions = append(conditions, dateTimeStr+" >= ?")
// params = append(params, query.StartTime)
// }
// if query.EndTime != "" {
// conditions = append(conditions, dateTimeStr+" <= ?")
// params = append(params, query.EndTime)
// }
// 构建查询条件语句
whereSql := ""
if len(conditions) > 0 {
@@ -144,18 +46,9 @@ func (r *PerfKPIImpl) SelectKpiReport(query model.GoldKPIQuery, kpiIds []string)
}
// 查询字段列
var dateTimeStr string = "CONCAT(gk.`date`, \" \", gk.start_time)"
timeFormat := "DATE_FORMAT(" + dateTimeStr + ", '%Y-%m-%d %H:%i:')"
secondGroup := fmt.Sprintf("LPAD(FLOOR(SECOND(gk.start_time) / %d) * %d, 2, '0')", query.Interval, query.Interval)
groupByField := fmt.Sprintf("CONCAT( %s, %s ) AS timeGroup", timeFormat, secondGroup)
if query.Interval > 60 {
minute := query.Interval / 60
timeFormat = "DATE_FORMAT(" + dateTimeStr + ", '%Y-%m-%d %H:')"
minuteGroup := fmt.Sprintf("LPAD(FLOOR(MINUTE(gk.start_time) / %d) * %d, 2, '0')", minute, minute)
groupByField = fmt.Sprintf("CONCAT( %s, %s ) AS timeGroup", timeFormat, minuteGroup)
}
var fields = []string{
groupByField,
// fmt.Sprintf("FROM_UNIXTIME(FLOOR(gk.created_at / (%d * 1000)) * %d) AS timeGroup", query.Interval, query.Interval),
fmt.Sprintf("CONCAT(FLOOR(gk.created_at / (%d * 1000)) * (%d * 1000)) AS timeGroup", query.Interval, query.Interval), // 时间戳毫秒
"min(CASE WHEN gk.index != '' THEN gk.index ELSE 0 END) AS startIndex",
"min(CASE WHEN gk.ne_type != '' THEN gk.ne_type ELSE 0 END) AS neType",
"min(CASE WHEN gk.ne_name != '' THEN gk.ne_name ELSE 0 END) AS neName",
@@ -205,19 +98,19 @@ func (r *PerfKPIImpl) SelectUPFTotalFlow(neType, rmUID, startDate, endDate strin
var conditions []string
var params []any
if neType != "" {
conditions = append(conditions, "gk.ne_type = ?")
conditions = append(conditions, "kupf.ne_type = ?")
params = append(params, neType)
}
if rmUID != "" {
conditions = append(conditions, "gk.rm_uid = ?")
conditions = append(conditions, "kupf.rm_uid = ?")
params = append(params, rmUID)
}
if startDate != "" {
conditions = append(conditions, "gk.date >= ?")
conditions = append(conditions, "kupf.created_at >= ?")
params = append(params, startDate)
}
if endDate != "" {
conditions = append(conditions, "gk.date <= ?")
conditions = append(conditions, "kupf.created_at <= ?")
params = append(params, endDate)
}
// 构建查询条件语句
@@ -227,44 +120,11 @@ func (r *PerfKPIImpl) SelectUPFTotalFlow(neType, rmUID, startDate, endDate strin
}
// 查询数据
querySql := fmt.Sprintf("SELECT sum( CASE WHEN gk.kpi_id = 'UPF.03' THEN gk.VALUE ELSE 0 END ) AS 'up', sum( CASE WHEN gk.kpi_id = 'UPF.06' THEN gk.VALUE ELSE 0 END ) AS 'down' FROM gold_kpi gk %s", whereSql)
results, err := datasource.RawDB("", querySql, params)
if err != nil {
logger.Errorf("query err => %v", err)
}
return results[0]
}
// SelectUPFTotalFlow 查询UPF总流量 N3上行 N6下行
func (r *PerfKPIImpl) SelectUPFThroughput(neType, rmUID, startDate, endDate string) map[string]any {
// 查询条件拼接
var conditions []string
var params []any
if neType != "" {
conditions = append(conditions, "gk.ne_type = ?")
params = append(params, neType)
}
if rmUID != "" {
conditions = append(conditions, "gk.rm_uid = ?")
params = append(params, rmUID)
}
if startDate != "" {
conditions = append(conditions, "gk.date >= ?")
params = append(params, startDate)
}
if endDate != "" {
conditions = append(conditions, "gk.date <= ?")
params = append(params, endDate)
}
// 构建查询条件语句
whereSql := ""
if len(conditions) > 0 {
whereSql += " where " + strings.Join(conditions, " and ")
}
// 查询数据
querySql := fmt.Sprintf("SELECT sum( CASE WHEN JSON_EXTRACT(gk.kpi_values, '$[2].kpi_id') = 'UPF.03' THEN JSON_EXTRACT(gk.kpi_values, '$[2].value') ELSE 0 END ) AS 'up', sum( CASE WHEN JSON_EXTRACT(gk.kpi_values, '$[5].kpi_id') = 'UPF.06' THEN JSON_EXTRACT(gk.kpi_values, '$[5].value') ELSE 0 END ) AS 'down' FROM kpi_report_upf gk %s", whereSql)
results, err := datasource.RawDB("", querySql, params)
querySql := `SELECT
sum( CASE WHEN JSON_EXTRACT(kupf.kpi_values, '$[2].kpi_id') = 'UPF.03' THEN JSON_EXTRACT(kupf.kpi_values, '$[2].value') ELSE 0 END ) AS 'up',
sum( CASE WHEN JSON_EXTRACT(kupf.kpi_values, '$[5].kpi_id') = 'UPF.06' THEN JSON_EXTRACT(kupf.kpi_values, '$[5].value') ELSE 0 END ) AS 'down'
FROM kpi_report_upf kupf`
results, err := datasource.RawDB("", querySql+whereSql, params)
if err != nil {
logger.Errorf("query err => %v", err)
}

View File

@@ -31,8 +31,7 @@ func (r *PerfKPIImpl) SelectGoldKPI(query model.GoldKPIQuery) []map[string]any {
kpiIds = append(kpiIds, kpiId.KPIID)
}
//data := r.perfKPIRepository.SelectGoldKPI(query, kpiIds)
data := r.perfKPIRepository.SelectKpiReport(query, kpiIds)
data := r.perfKPIRepository.SelectGoldKPI(query, kpiIds)
if data == nil {
return []map[string]any{}
}
@@ -46,12 +45,11 @@ func (r *PerfKPIImpl) SelectGoldKPITitle(neType string) []model.GoldKPITitle {
// SelectUPFTotalFlow 查询UPF总流量 N3上行 N6下行
func (r *PerfKPIImpl) SelectUPFTotalFlow(neType, rmUID string, day int) map[string]any {
// 获取当前日期
now := time.Now()
endDate := now.Format("2006-01-02")
// 获取当前日期
endDate := fmt.Sprint(now.UnixMilli())
// 将当前日期前几天数
afterDays := now.AddDate(0, 0, -day)
startDate := afterDays.Format("2006-01-02")
startDate := fmt.Sprint(now.AddDate(0, 0, -day).Truncate(24 * time.Hour).UnixMilli())
var info map[string]any
@@ -61,14 +59,18 @@ func (r *PerfKPIImpl) SelectUPFTotalFlow(neType, rmUID string, day int) map[stri
if infoStr != "" {
json.Unmarshal([]byte(infoStr), &info)
expireSecond, _ := redis.GetExpire("", key)
expireMinute := (time.Duration(int64(expireSecond)) * time.Second)
if expireMinute > 2*time.Minute {
if expireSecond > 120 {
return info
}
}
//info = r.perfKPIRepository.SelectUPFTotalFlow(neType, rmUID, startDate, endDate)
info = r.perfKPIRepository.SelectUPFThroughput(neType, rmUID, startDate, endDate)
info = r.perfKPIRepository.SelectUPFTotalFlow(neType, rmUID, startDate, endDate)
if v, ok := info["up"]; ok && v == nil {
info["up"] = 0
}
if v, ok := info["down"]; ok && v == nil {
info["down"] = 0
}
// 保存到缓存
infoJSON, _ := json.Marshal(info)

View File

@@ -0,0 +1,54 @@
package networkdata
import (
"crypto/des"
"errors"
"testing"
)
// 加密
func encrypt(origData, key []byte) ([]byte, error) {
if len(origData) < 1 || len(key) < 1 {
return nil, errors.New("wrong data or key")
}
block, err := des.NewCipher(key)
if err != nil {
return nil, err
}
bs := block.BlockSize()
if len(origData)%bs != 0 {
return nil, errors.New("wrong padding")
}
out := make([]byte, len(origData))
dst := out
for len(origData) > 0 {
block.Encrypt(dst, origData[:bs])
origData = origData[bs:]
dst = dst[bs:]
}
return out, nil
}
func TestEncrypt(t *testing.T) {
// key := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}
// 0123456789abcdef
// ki := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}
// 0123456789abcdef0123456789abcdef
// 密码
key := []byte{0x12, 0x34, 0x12, 0x34, 0x12, 0x34, 0x12, 0x34}
// 1234123412341234
// 要加密的ki
ki := []byte{0x80, 0x5D, 0xAD, 0xC6, 0xE8, 0xA5, 0x4A, 0x0D, 0x59, 0xD6, 0x22, 0xC7, 0xA0, 0x4D, 0x08, 0xE0}
// 805DADC6E8A54A0D59D622C7A04D08E0
kis, err := encrypt(ki, key)
// 加密后的放导入导入文件里ki
t.Errorf("kis: %x\n", kis)
// 3e479135bb16f45dc874a18831b54d71
t.Errorf("err: %v\n", err)
}