PriceForecast/lib/dataread.py

2099 lines
82 KiB
Python
Raw Normal View History

2024-11-19 15:51:58 +08:00
2024-11-01 16:38:21 +08:00
# 导入模块
2025-03-05 09:47:02 +08:00
from reportlab.lib.units import cm # 单位cm
from reportlab.graphics.shapes import Drawing # 绘图工具
from reportlab.graphics.charts.legends import Legend # 图例类
from reportlab.graphics.charts.barcharts import VerticalBarChart # 图表类
from reportlab.lib import colors # 颜色模块
from reportlab.lib.styles import getSampleStyleSheet # 文本样式
from reportlab.lib.pagesizes import letter # 页面的标志尺寸(8.5*inch, 11*inch)
from reportlab.platypus import Table, SimpleDocTemplate, Paragraph, Image # 报告内容相关类
from reportlab.pdfbase.ttfonts import TTFont # 字体类
from reportlab.pdfbase import pdfmetrics # 注册字体
from sklearn import metrics
from datetime import timedelta
import matplotlib.pyplot as plt
2024-11-01 16:38:21 +08:00
import pandas as pd
import numpy as np
import datetime
import string
import base64
import requests
import random
import time
2025-03-05 09:47:02 +08:00
import re
2024-11-01 16:38:21 +08:00
import os
import hmac
import hashlib
import json
import math
2024-11-01 16:38:21 +08:00
import torch
torch.set_float32_matmul_precision("high")
2025-03-05 09:47:02 +08:00
# 设置plt显示中文
2024-11-01 16:38:21 +08:00
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 注册字体(提前准备好字体文件, 如果同一个文件需要多种字体可以注册多个)
pdfmetrics.registerFont(TTFont('SimSun', 'SimSun.ttf'))
2025-03-05 09:47:02 +08:00
# 设置plt显示中文
2024-11-01 16:38:21 +08:00
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
2024-12-13 10:03:25 +08:00
# from config_jingbo_pro import *
2025-02-11 08:53:51 +08:00
# from config_jingbo import *
# from config_jingbo_yuedu import *
# from config_yongan import *
2025-02-25 16:03:02 +08:00
# from config_juxiting import *
# from config_juxiting_zhoudu import *
# from config_juxiting_pro import *
2024-11-22 13:26:10 +08:00
2025-03-05 09:47:02 +08:00
# from config_jingbo import logger
2024-11-22 13:26:10 +08:00
global_config = {
# 核心配置项
'logger': None, # 日志记录器
'dataset': None, # 数据集路径
'y': None, # 目标变量列名
'is_fivemodels': None,
# 模型参数
'data_set': None, # 数据集名称
'input_size': None, # 输入维度
'horizon': None, # 预测步长
'train_steps': None, # 训练步数
'val_check_steps': None, # 验证间隔
# 特征工程开关
'is_del_corr': None, # 是否删除相关性特征
'is_del_tow_month': None, # 是否删除近两月未更新特征
'is_eta': None, # ETA功能开关
'is_update_eta': None, # 更新ETA开关
'is_update_eta_data': None, # ETA数据更新开关
'early_stop_patience_steps': None, # 早停步数
'is_update_report': None, # 是否更新报告开关
# 时间参数
'start_year': None, # 起始年份
'end_time': None, # 新增结束时间参数 ← 增加缺失的配置项
'freq': [None], # 数据频率(保留列表结构)
# 数据上传
'upload_url': None, # 主数据上传地址
'upload_headers': None, # 上传请求头
'upload_warning_url': None, # 预警数据上传地址
'upload_warning_data': None, # 预警数据结构
# 查询接口
'query_data_list_item_nos_url': None, # 数据项查询地址
'query_data_list_item_nos_data': None, # 数据项查询参数
# 字段映射
'offsite_col': None, # 站点字段
'avg_col': None, # 平均值字段
'offsite': None, # 站点名称
'edbcodenamedict': None, # EDB编码映射
'rote': None, # 绘图上下界阈值
# 接口配置(原有配置)
'login_pushreport_url': None,
'login_data': None,
'upload_warning_headers': None,
# ETA配置
'APPID': None,
'SECRET': None,
# 数据库配置
'sqlitedb': None,
}
# logger = global_config['logger']
# dataset = global_config['dataset']
# y = global_config['y']
# data_set = global_config['data_set']
# input_size = global_config['input_size']
# horizon = global_config['horizon']
# train_steps = global_config['train_steps']
# val_check_steps = global_config['val_check_steps']
# is_del_corr = global_config['is_del_corr']
# is_del_tow_month = global_config['is_del_tow_month']
# is_eta = global_config['is_eta']
# is_update_eta = global_config['is_update_eta']
# is_update_eta_data = global_config['is_update_eta_data']
# start_year = global_config['start_year']
# end_time = global_config['end_time']
# freq = global_config['freq'][0]
# offsite_col = global_config['offsite_col']
# avg_cols = global_config['avg_col']
# offsite = global_config['offsite']
# edbcodenamedict = global_config['edbcodenamedict']
# query_data_list_item_nos_url = global_config['query_data_list_item_nos_url']
# query_data_list_item_nos_data = global_config['query_data_list_item_nos_data']
# config.login_pushreport_url = global_config['config.login_pushreport_url']
# login_data = global_config['login_data']
# upload_url = global_config['upload_url']
# upload_warning_url = global_config['upload_warning_url']
# upload_warning_data = global_config['upload_warning_data']
# warning_data = global_config['upload_warning_data']
# APPID = global_config['APPID']
# SECRET = global_config['SECRET']
2024-11-01 16:38:21 +08:00
# 定义函数
2024-11-01 16:38:21 +08:00
def loadcsv(filename):
2024-12-06 09:43:01 +08:00
"""
读取指定文件名的 CSV 文件
如果文件编码为 UTF-8则使用 UTF-8 编码读取否则使用 GBK 编码读取
参数:
filename (str): 要读取的 CSV 文件的文件名
返回:
pandas.DataFrame: 读取的数据
"""
2024-11-01 16:38:21 +08:00
# 读取csv文件
try:
df = pd.read_csv(filename, encoding='utf-8')
except UnicodeDecodeError:
df = pd.read_csv(filename, encoding='gbk')
return df
2024-12-06 09:43:01 +08:00
2024-11-01 16:38:21 +08:00
def dateConvert(df, datecol='ds'):
2024-12-06 09:43:01 +08:00
"""
将数据框 df 中的 datecol 列转换为日期时间类型
参数:
df (pandas.DataFrame): 要转换的 DataFrame
datecol (str): 要转换的列名默认为 'ds'
返回:
pandas.DataFrame: 转换后的 DataFrame
"""
2024-11-01 16:38:21 +08:00
# 将date列转换为datetime类型
try:
2025-03-05 09:47:02 +08:00
df[datecol] = pd.to_datetime(df[datecol], format=r'%Y-%m-%d')
2024-11-01 16:38:21 +08:00
except:
2025-03-05 09:47:02 +08:00
df[datecol] = pd.to_datetime(df[datecol], format=r'%Y/%m/%d')
2024-11-01 16:38:21 +08:00
return df
def calculate_kdj(data, n=9):
'''
2024-12-06 09:43:01 +08:00
给传进来的df 添加列 波动率最高最低k,d j
2024-11-01 16:38:21 +08:00
'''
2024-12-06 09:43:01 +08:00
# 对数据按照日期升序排序
2024-11-01 16:38:21 +08:00
data = data.sort_values(by='ds', ascending=True)
# 因为没有高开低价格,利用每日波动率模拟当天最高价和最低价
data['pctchange'] = data['y'].pct_change()
# 收益为0的用0.01
2025-03-05 09:47:02 +08:00
data['pctchange'] = data['pctchange'].replace(0, 0.01)
2024-12-06 09:43:01 +08:00
# 去除空值
2024-11-01 16:38:21 +08:00
data.dropna(inplace=True)
# 重置索引
2025-03-05 09:47:02 +08:00
data.reset_index(drop=True, inplace=True)
2024-12-06 09:43:01 +08:00
# 计算最高价和最低价
2025-03-05 09:47:02 +08:00
data['high'] = data['y'] * (1+abs(data['pctchange'])/2)
data['low'] = data['y'] * (1-abs(data['pctchange'])/2)
2024-12-06 09:43:01 +08:00
# 计算n日内最低价
2024-11-01 16:38:21 +08:00
low_list = data['y'].rolling(window=n, min_periods=1).min()
2024-12-06 09:43:01 +08:00
# 计算n日内最高价
2024-11-01 16:38:21 +08:00
high_list = data['y'].rolling(window=n, min_periods=1).max()
2024-12-06 09:43:01 +08:00
# 计算未成熟随机值
2024-11-01 16:38:21 +08:00
rsv = ((data['y'] - low_list) / (high_list - low_list)) * 100
2024-12-06 09:43:01 +08:00
# 初始化k值为50
2024-11-01 16:38:21 +08:00
k = pd.Series(50, index=data.index)
2024-12-06 09:43:01 +08:00
# 初始化d值为50
2024-11-01 16:38:21 +08:00
d = pd.Series(50, index=data.index)
2024-12-06 09:43:01 +08:00
# 计算k值和d值
2024-11-01 16:38:21 +08:00
for i in range(1, len(data)):
k[i] = (2/3 * k[i - 1]) + (1/3 * rsv[i])
d[i] = (2/3 * d[i - 1]) + (1/3 * k[i])
2024-12-06 09:43:01 +08:00
# 计算j值
2024-11-01 16:38:21 +08:00
j = 3 * k - 2 * d
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 将k值、d值和j值添加到数据中
2024-11-01 16:38:21 +08:00
data['K'] = k
data['D'] = d
data['J'] = j
# 将包含 KDJ 指标的数据保存到新的 CSV 文件
data.to_csv('stock_data_with_kdj.csv', index=False)
# data = data.dropna()
return data
2024-12-06 09:43:01 +08:00
2024-11-01 16:38:21 +08:00
# 上传报告
def get_head_auth_report():
2024-12-06 09:43:01 +08:00
"""
通过 POST 请求登录到指定的 URL并从响应中获取认证令牌
返回:
str: 如果登录成功返回认证令牌否则返回 None
"""
config.logger.info("获取token中...")
config.logger.info(
f'url:{config.login_pushreport_url},login_data:{config.login_data}')
2024-12-06 09:43:01 +08:00
# 发送 POST 请求到登录 URL携带登录数据
login_res = requests.post(url=config.login_pushreport_url,
json=config.login_data, timeout=(3, 30))
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 将响应内容转换为 JSON 格式
2024-11-01 16:38:21 +08:00
text = json.loads(login_res.text)
config.logger.info(f'token接口响应{text}')
2024-12-06 09:43:01 +08:00
# 如果响应状态为成功
2024-11-01 16:38:21 +08:00
if text["status"]:
2024-12-06 09:43:01 +08:00
# 从响应数据中获取认证令牌
2024-11-01 16:38:21 +08:00
token = text["data"]["accessToken"]
2024-12-06 09:43:01 +08:00
# 返回认证令牌
2024-11-01 16:38:21 +08:00
return token
def upload_report_data(token, upload_data):
2024-12-06 09:43:01 +08:00
"""
上传报告数据到指定的URL
参数:
token (str): 认证令牌
upload_data (dict): 要上传的报告数据包含必要的字段和信息
返回:
dict: 如果上传成功返回响应对象否则返回None
"""
# 直接使用传入的 upload_data
2024-11-01 16:38:21 +08:00
upload_data = upload_data
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 设置请求头部
2024-11-01 16:38:21 +08:00
headers = {"Authorization": token}
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示正在上传报告数据
config.logger.info("报告上传中...")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示认证头部信息
config.logger.info(f"token:{token}")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示要上传的报告数据
config.logger.info(f"upload_data:{upload_data}")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 发送POST请求上传报告数据
2025-03-05 09:47:02 +08:00
upload_res = requests.post(
url=config.upload_url, headers=headers, json=upload_data, timeout=(3, 15))
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 将响应内容转换为 JSON 格式
2024-11-01 16:38:21 +08:00
upload_res = json.loads(upload_res.text)
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示响应内容
config.logger.info(upload_res)
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 如果上传成功,返回响应对象
2024-11-01 16:38:21 +08:00
if upload_res:
return upload_res
2024-12-06 09:43:01 +08:00
# 如果上传失败打印日志并返回None
2024-11-01 16:38:21 +08:00
else:
config.logger.info("报告上传失败")
2024-11-01 16:38:21 +08:00
return None
2024-12-06 09:43:01 +08:00
def upload_warning_data(warning_data):
2024-12-06 09:43:01 +08:00
"""
上传预警数据到指定的URL
参数:
warning_data (dict): 要上传的预警数据包含必要的字段和信息
返回:
requests.Response: 如果上传成功返回响应对象否则返回None
"""
# 获取认证头部信息
token = get_head_auth_report()
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 设置请求头部
2024-11-18 14:27:04 +08:00
headers = {"Authorization": token}
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示正在上传预警数据
config.logger.info("预警上传中...")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志显示上传的URL
config.logger.info(f"upload_warning_url:{config.upload_warning_url}")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示认证头部信息
config.logger.info(f"token:{token}")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 打印日志,显示要上传的预警数据
config.logger.info(f"warning_data:{config.warning_data}")
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 发送POST请求上传预警数据
2025-03-05 09:47:02 +08:00
upload_res = requests.post(
url=config.upload_warning_url, headers=headers, json=config.warning_data, timeout=(3, 15))
2025-03-05 09:47:02 +08:00
2024-12-06 09:43:01 +08:00
# 如果上传成功,返回响应对象
2024-11-18 14:27:04 +08:00
if upload_res:
return upload_res
2024-12-06 09:43:01 +08:00
# 如果上传失败打印日志并返回None
2024-11-18 14:27:04 +08:00
else:
config.logger.info("预警上传失败")
2024-11-18 14:27:04 +08:00
return None
2024-12-06 09:43:01 +08:00
2024-12-05 16:35:37 +08:00
def upload_warning_info(df_count):
2024-12-06 09:44:49 +08:00
"""
上传预警信息到指定的URL
参数:
df_count (int): 停更的数量
返回:
None
"""
# 打印日志,显示正在上传预警信息
config.logger.info(f'上传预警信息')
2025-03-05 09:47:02 +08:00
try:
2024-12-06 09:44:49 +08:00
# 获取当前日期
2025-03-05 09:47:02 +08:00
warning_date = datetime.datetime.now().strftime('%Y-%m-%d')
warning_date2 = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
2024-12-06 09:44:49 +08:00
# 构建预警内容
2024-12-05 16:35:37 +08:00
content = f'{warning_date}{df_count}个停更'
2025-03-05 09:47:02 +08:00
2024-12-06 09:44:49 +08:00
# 更新预警数据中的日期和内容
config.warning_data['data']['WARNING_DATE'] = warning_date2
config.warning_data['data']['WARNING_CONTENT'] = content
2025-03-05 09:47:02 +08:00
2024-12-06 09:44:49 +08:00
# 调用 upload_warning_data 函数上传预警数据
upload_warning_data(config.warning_data)
2025-03-05 09:47:02 +08:00
2024-12-06 09:44:49 +08:00
# 打印日志,显示上传预警信息成功
config.logger.info(f'上传预警信息成功')
except Exception as e:
2024-12-06 09:44:49 +08:00
# 打印日志,显示上传预警信息失败,并记录异常信息
config.logger.error(f'上传预警信息失败:{e}')
2024-12-06 09:44:49 +08:00
def create_feature_last_update_time(df):
"""
计算特征停更信息用
参数:
df (DataFrame): 包含特征数据的 DataFrame
返回:
DataFrame: 包含特征停更信息的 DataFrame
str: y 列的最后更新时间
"""
df1 = df.copy()
# 找到每列的最后更新时间
df1.set_index('ds', inplace=True)
2025-03-05 09:47:02 +08:00
last_update_times = df1.apply(lambda x: x.dropna().index.max().strftime(
'%Y-%m-%d') if not x.dropna().empty else None)
# 保存每列的最后更新时间到文件
2025-03-05 09:47:02 +08:00
last_update_times_df = pd.DataFrame(columns=[
'feature', 'last_update_time', 'is_value', 'update_period', 'warning_date', 'stop_update_period'])
# 打印每列的最后更新时间
for column, last_update_time in last_update_times.items():
values = []
# 判断是不是常数值
if df1[column].tail(20).nunique() == 1:
2025-03-05 09:47:02 +08:00
values = values + [column, last_update_time, 1]
else:
2025-03-05 09:47:02 +08:00
values = values + [column, last_update_time, 0]
# 计算特征数据值的时间差
try:
# 计算预警日期
2025-03-05 09:47:02 +08:00
time_diff = (df1[column].dropna().index.to_series().diff().mode()[
0]).total_seconds() / 3600 / 24
last_update_time_datetime = datetime.datetime.strptime(
last_update_time, '%Y-%m-%d')
last_update_date = config.end_time if config.end_time != '' else datetime.datetime.now(
).strftime('%Y-%m-%d')
2025-03-05 09:47:02 +08:00
end_time_datetime = datetime.datetime.strptime(
last_update_date, '%Y-%m-%d')
early_warning_date = last_update_time_datetime + \
timedelta(days=time_diff)*2 + timedelta(days=1)
stop_update_period = int(
math.ceil((end_time_datetime-last_update_time_datetime).days / time_diff))
early_warning_date = early_warning_date.strftime('%Y-%m-%d')
except KeyError:
time_diff = 0
early_warning_date = config.end_time
2024-11-19 13:48:55 +08:00
continue
2025-03-05 09:47:02 +08:00
values = values + [time_diff, early_warning_date, stop_update_period]
last_update_times_df.loc[len(last_update_times_df)] = values
config.logger.info(
f"Column {column} was last updated at {last_update_time}")
2025-03-05 09:47:02 +08:00
y_last_update_time = last_update_times_df[last_update_times_df['feature']
== 'y']['warning_date'].values[0]
last_update_times_df.to_csv(os.path.join(
config.dataset, 'last_update_times.csv'), index=False)
config.logger.info('特征停更信息保存到文件last_update_times.csv')
2025-03-05 09:47:02 +08:00
return last_update_times_df, y_last_update_time
2024-11-01 16:38:21 +08:00
2024-11-01 16:38:21 +08:00
# 统计特征频度
def featurePindu(dataset):
# 读取文件
2025-03-05 09:47:02 +08:00
df = loadcsv(os.path.join(dataset, '未填充的特征数据.csv'))
2024-11-01 16:38:21 +08:00
df['ds'] = pd.to_datetime(df['ds'])
# 按ds正序排序重置索引
df = df.sort_values(by='ds', ascending=True).reset_index(drop=True)
# 统计特征频度
# 每列随机抽取10个值计算出5个时间间隔统计每个时间间隔的频度
columns = df.columns.to_list()
columns.remove('ds')
count_dict = {}
for column in columns:
# 获取每列时间间隔
2025-03-05 09:47:02 +08:00
values = df[[column, 'ds']]
values.dropna(inplace=True, axis=0)
values = values.reset_index(drop=True)
2024-11-01 16:38:21 +08:00
# 抽取20%个值
value = values.sample(frac=0.2)
index = value.index
next_index = index + 1
count = []
2025-03-05 09:47:02 +08:00
for i, j in zip(index, next_index):
# 通过索引计算日期差
2024-11-01 16:38:21 +08:00
try:
2025-03-05 09:47:02 +08:00
count.append((values.loc[j, 'ds'] - values.loc[i, 'ds']).days)
2024-11-01 16:38:21 +08:00
except:
pass
2025-03-05 09:47:02 +08:00
# 把31 换成 30
2024-11-01 16:38:21 +08:00
count = [30 if i == 31 else i for i in count]
# 保留count中出现次数最多的数
try:
count = max(set(count), key=count.count)
2025-03-05 09:47:02 +08:00
except ValueError:
config.logger.info(f'{column}列数据为空')
2024-11-01 16:38:21 +08:00
continue
# 存储到字典中
count_dict[column] = count
2025-03-05 09:47:02 +08:00
df = pd.DataFrame(count_dict, index=['count']).T
2024-11-01 16:38:21 +08:00
pindu_dfs = pd.DataFrame()
# 根据count分组
# 输出特征频度统计
2025-03-05 09:47:02 +08:00
pindudict = {'1': '日度', '3': '日度', '7': '周度',
'30': '月度', '90': '季度', '180': '半年度', '365': '年度'}
2024-11-01 16:38:21 +08:00
for i in df.groupby('count'):
# 获取 i[1] 的索引值
index = i[1].index
pindu_df = pd.DataFrame()
try:
pindu_df[pindudict[str(i[0])]+f'({len(i[1])})'] = index
2025-03-05 09:47:02 +08:00
except KeyError:
2024-11-01 16:38:21 +08:00
pindu_df[str(i[0])+f'天({len(i[1])})'] = index
# 合并到pindu_dfs
2025-03-05 09:47:02 +08:00
pindu_dfs = pd.concat([pindu_dfs, pindu_df], axis=1)
2024-11-01 16:38:21 +08:00
# nan替换为 ' '
pindu_dfs = pindu_dfs.fillna('')
2025-03-05 09:47:02 +08:00
pindu_dfs.to_csv(os.path.join(dataset, '特征频度统计.csv'), index=False)
config.logger.info(pindu_dfs)
2024-11-01 16:38:21 +08:00
featureInfo = f'特征信息:总共有{len(columns)-2}'
for i in pindu_dfs.columns:
featureInfo += f',{i}'
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
featureInfo += ', 详看 附1、特征列表'
featureInfo += '''
数据特征工程
1. 数据日期排序新日期在最后
2. 删除空列特征数据列没有值就删除
3. 删除近两月不再更新值的指标
4. 非日度数据填充为日度数据填充规则
-- 向后填充举例假设周五出现一个周度指标数据那么在这之前的数据用上周五的数据
-- 向前填充举例采集数据开始日期为2018年1月1日那么周度数据可能是2018年1月3日那么3日的数据向前填充使1日2日都有数值
数据特征相关性分析
'''
config.logger.info(featureInfo)
2025-03-05 09:47:02 +08:00
with open(os.path.join(dataset, '特征频度统计.txt'), 'w', encoding='utf-8') as f:
2024-11-01 16:38:21 +08:00
f.write(featureInfo)
config.logger.info('*'*200)
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
def featureAnalysis(df, dataset, y):
2024-11-01 16:38:21 +08:00
# 特征筛选
import matplotlib.pyplot as plt
# 选择特征和标签列
X = df.drop(['ds', 'y'], axis=1) # 特征集,排除时间戳和标签列
yy = df['y'] # 标签集
# 标签集自相关函数分析
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(yy, lags=30)
2025-03-05 09:47:02 +08:00
plt.savefig(os.path.join(dataset, '指标数据自相关图.png'))
2024-11-01 16:38:21 +08:00
plt.close()
# 标签集偏自相关函数分析
from statsmodels.graphics.tsaplots import plot_pacf
plot_pacf(yy, lags=30)
2025-03-05 09:47:02 +08:00
plt.savefig(os.path.join(dataset, '指标数据偏自相关图.png'))
2024-11-01 16:38:21 +08:00
plt.close()
# 画 特征与价格散点图
# 删除所有*散点图.png
for file in os.listdir(dataset):
if file.endswith("散点图.png"):
os.remove(os.path.join(dataset, file))
plt.rcParams['font.sans-serif'] = ['SimHei']
2025-03-05 09:47:02 +08:00
plt.rcParams['axes.unicode_minus'] = False
2024-11-01 16:38:21 +08:00
plt.figure(figsize=(10, 10))
# # 遍历X每一列和yy画散点图
# for i, col in enumerate(X.columns):
# plt.subplot(2, 2, i%4+1)
# plt.scatter(X[col], yy)
# plt.xlabel(col)
# plt.ylabel(y)
# plt.title(col)
# if i % 4 == 3 or i == len(X.columns)-1:
# plt.tight_layout()
# plt.savefig(os.path.join(dataset,f'{i}指标数据特征与价格散点图.png'))
# plt.close()
2024-11-01 16:38:21 +08:00
def corr_feature(df):
# 重新命名列名,列名排序,y在第一个
df.reindex(['y'] + sorted(df.columns.difference(['y'])))
df_test = df.copy()
# 取最后的220行
df_test = df_test.tail(220)
# 去掉日期列
df_test = df_test.drop(columns=['ds'])
# 不参与标准化
df_test_noscaler = df_test.copy() # 滞后处理备份
df_noscaler = df_test.copy()
# 画出相关性热力图
df_test.to_csv(os.path.join(config.dataset, '同步相关性.csv'))
2024-11-01 16:38:21 +08:00
corr = df_test.corr()
# 保存相关系数
corr.to_csv(os.path.join(config.dataset, '同步相关性系数.csv'))
2024-11-01 16:38:21 +08:00
# plt.figure(figsize=(10, 10))
# sns.heatmap(corr, annot=True, cmap='coolwarm')
# plt.savefig('dataset/同步相关性热力图.png')
# plt.show()
# 读取滞后周期文件,更改特征
2025-03-05 09:47:02 +08:00
characteristic_period = pd.read_csv('dataset/特征滞后周期.csv', encoding='utf-8')
2024-11-01 16:38:21 +08:00
# 去掉周期为0的行
2025-03-05 09:47:02 +08:00
characteristic_period = characteristic_period.drop(
characteristic_period[characteristic_period['滞后周期'] == 0].index)
2024-11-01 16:38:21 +08:00
for col in df.columns:
# 跳过y列
if col in ['y']:
continue
# 特征滞后n个周期计算与y的相关性
if col in characteristic_period['特征'].values:
# 获取特征对应的周期
2025-03-05 09:47:02 +08:00
period = characteristic_period[characteristic_period['特征']
== col]['滞后周期'].values[0]
2024-11-01 16:38:21 +08:00
# 滞后处理
df[col] = df[col].shift(period)
df.to_csv(os.path.join(config.dataset, '滞后处理后的数据集.csv'))
2024-11-01 16:38:21 +08:00
# corr_feture_noscaler = {} # 保存相关性最大的周期
# 遍历df_test的每一列计算相关性
# for col in df_noscaler.columns:
# # 跳过y列
# if col in ['y']:
# continue
# config.logger.info('特征:', col)
2025-03-05 09:47:02 +08:00
# # 特征滞后n个周期计算与y的相关性
# corr_dict = {}
# try:
# for i in range(0, 200):
# if i == 0:
# df_noscaler[col+'_'+str(i)] = df_noscaler[col]
# else:
# df_noscaler[col+'_'+str(i)] = df_noscaler[col].shift(i)
# corr_dict[col+'_'+str(i)] = abs(df_noscaler[col+'_'+str(i)].corr(df_noscaler['y']))
# except :
# config.logger.info('特征:', col, '滑动错误,请查看')
2025-03-05 09:47:02 +08:00
# continue
# 输出相关性最大的特征
# config.logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)])
2025-03-05 09:47:02 +08:00
# corr_feture_noscaler[col] = max(corr_dict, key=corr_dict.get).split('_')[-1]
# 画出最相关性最大的特征和y的折线图
# plt.figure(figsize=(10, 5))
# plt.plot(df_noscaler[max(corr_dict, key=corr_dict.get)], label=max(corr_dict, key=corr_dict.get))
# # 设置双坐标轴
# ax1 = plt.gca()
# ax2 = ax1.twinx()
# ax2.plot(df_noscaler['y'], color='r', label='y')
# plt.legend()
# try:
# plt.savefig('dataset/特征与y的折线图_'+max(corr_dict, key=corr_dict.get)+'.png')
# except :
# # 替换成_
# plt.savefig('dataset/特征与y的折线图_'+max(corr_dict, key=corr_dict.get).replace(':','_').replace('/','_').replace('(','_').replace(')','_')+'.png')
# plt.close()
2024-11-01 16:38:21 +08:00
# 结果保存到txt文件
# config.logger.info('不参与标准化的特征滞后相关性写入txt文件')
2024-11-01 16:38:21 +08:00
# with open('dataset/不参与标准化的特征滞后相关性.txt', 'w') as f:
# for key, value in corr_feture_noscaler.items():
# f.write('%s:%s\n' % (key, value))
# 遍历corr_feture_noscaler,更改df
# colnames_noscaler = []
# for col in corr_feture_noscaler:
# colname = col+'_'+corr_feture_noscaler[col]
# if int(corr_feture_noscaler[col]) == 0:
# continue
# df_test_noscaler[colname] = df_test_noscaler[col].shift(int(corr_feture_noscaler[col]))
# df_test_noscaler = df_test_noscaler.drop(columns=[col])
# colnames_noscaler.append(colname)
# 去除有空值的行
# df_test_noscaler = df_test_noscaler.dropna()
# df_test_noscaler.reindex(['y'] + sorted(df_test_noscaler.columns.difference(['y'])))
# df_test_noscaler.to_csv('dataset/不参与标准化的特征滞后相关性.csv', index=False)
# 画出相关性热力图
# corr = df_test_noscaler.corr()
# 保存相关系数
# corr.to_csv(os.path.join(dataset,'不参与标准化的特征滞后相关性系数.csv'))
# plt.figure(figsize=(10, 10))
# sns.heatmap(corr, annot=True, cmap='coolwarm')
# plt.savefig('dataset/不参与标准化的特征滞后相关性热力图.png')
# plt.close()
# # 标准化每列
# from sklearn.preprocessing import StandardScaler
# scaler = StandardScaler()
# df_test = pd.DataFrame(scaler.fit_transform(df_test), columns=df_test.columns)
# corr_feture = {} # 保存相关性最大的周期
# # 遍历df_test的每一列计算相关性
# for col in df_test.columns:
# # 跳过y列
# if col == 'y':
# continue
# config.logger.info('特征:', col)
2024-11-01 16:38:21 +08:00
# # 特征滞后n个周期计算与y的相关性
# corr_dict = {}
# try:
# for i in range(0, 200):
# if i == 0:
# df_test[col+'_'+str(i)] = df_test[col]
# else:
# df_test[col+'_'+str(i)] = df_test[col].shift(i)
# corr_dict[col+'_'+str(i)] = abs(df_test[col+'_'+str(i)].corr(df_test['y']))
# except :
# config.logger.info('特征:', col, '滑动错误,请查看')
2024-11-01 16:38:21 +08:00
# continue
# # 输出相关性最大的特征
# config.logger.info(max(corr_dict, key=corr_dict.get), corr_dict[max(corr_dict, key=corr_dict.get)])
2024-11-01 16:38:21 +08:00
# corr_feture[col] = max(corr_dict, key=corr_dict.get).split('_')[-1]
# # 结果保存到txt文件
# with open('dataset/标准化的特征滞后相关性.txt', 'w') as f:
# for key, value in corr_feture.items():
# f.write('%s:%s\n' % (key, value))
# # 遍历corr_feture,更改df
# colnames = []
# for col in corr_feture:
# colname = col+'_'+corr_feture[col]
# if int(corr_feture[col]) == 0:
# continue
# df[colname] = df[col].shift(int(corr_feture[col]))
# df = df.drop(columns=[col])
# colnames.append(colname)
# # 去除有空值的行
# df = df.dropna()
# df.reindex(['y'] + sorted(df.columns.difference(['y'])))
# df.to_csv('dataset/标准化后的特征滞后相关性.csv', index=False)
# # 画出相关性热力图
# ds = df['ds']
# df = df.drop(columns=['ds'])
# corr = df.corr()
# # 保存相关系数
# corr.to_csv(os.path.join(dataset,'标准化后的特征滞后相关性系数.csv'))
# plt.figure(figsize=(10, 10))
# sns.heatmap(corr, annot=True, cmap='coolwarm')
# plt.savefig('dataset/标准化后的特征滞后相关性热力图.png')
# plt.show()
# df['ds'] = ds
# 去除nan值
df = df.dropna()
return df
def calculate_kdj(data, n=9):
'''
给传进来的df 添加列 波动率最高最低k ,d j
'''
data = data.sort_values(by='ds', ascending=True)
# 因为没有高开低价格,利用每日波动率模拟当天最高价和最低价
data['pctchange'] = data['y'].pct_change()
# 收益为0的用0.01
2025-03-05 09:47:02 +08:00
data['pctchange'] = data['pctchange'].replace(0, 0.01)
2024-11-01 16:38:21 +08:00
data.dropna(inplace=True)
# 重置索引
2025-03-05 09:47:02 +08:00
data.reset_index(drop=True, inplace=True)
data['high'] = data['y'] * (1+abs(data['pctchange'])/2)
data['low'] = data['y'] * (1-abs(data['pctchange'])/2)
2024-11-01 16:38:21 +08:00
low_list = data['y'].rolling(window=n, min_periods=1).min()
high_list = data['y'].rolling(window=n, min_periods=1).max()
rsv = ((data['y'] - low_list) / (high_list - low_list)) * 100
k = pd.Series(50, index=data.index)
d = pd.Series(50, index=data.index)
for i in range(1, len(data)):
k[i] = (2/3 * k[i - 1]) + (1/3 * rsv[i])
d[i] = (2/3 * d[i - 1]) + (1/3 * k[i])
j = 3 * k - 2 * d
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
data['K'] = k
data['D'] = d
data['J'] = j
# 将包含 KDJ 指标的数据保存到新的 CSV 文件
data.to_csv('dataset\stock_data_with_kdj.csv', index=False)
# data = data.dropna()
return data
2025-03-05 09:47:02 +08:00
def check_column(df, col_name, two_months_ago):
'''
检查列是否需要删除
该函数会检查列是否为空值列180天没有更新的列或常数值列
参数:
col_name (str): 列名
df (DataFrame): 包含列的 DataFrame
返回:
bool: 如果列需要删除返回 True否则返回 False
'''
if 'ds' in col_name or 'y' in col_name:
return False
df_check_column = df[['ds', col_name, 'y']]
df_check_column = df_check_column.dropna()
if len(df_check_column) == 0:
print(f'空值列:{col_name}')
return True
# 判断是不是常数列
if df_check_column[(df_check_column['ds'] >= two_months_ago)].groupby(col_name).ngroups < 2:
print(f'180没有更新{col_name}')
return True
# 判断相关系数大于0.6
if config.is_del_corr > 0:
if abs(df_check_column[col_name].corr(df_check_column['y'])) < config.is_del_corr:
2025-03-05 09:47:02 +08:00
print(f'相关系数小于0.6{col_name}')
return True
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
corresponding_date = df_check_column.iloc[-1]['ds']
return corresponding_date < two_months_ago
def datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_time='', y='y', dataset='dataset', delweekenday=False, add_kdj=False, is_timefurture=False):
2024-11-14 10:21:25 +08:00
'''
原油特征数据处理函数
接收的是两个df一个是指标数据一个是指标列表
输出的是一个df包含dsy指标列
'''
2024-11-01 16:38:21 +08:00
df = df_zhibiaoshuju.copy()
2024-11-14 16:38:30 +08:00
2024-11-01 16:38:21 +08:00
if end_time == '':
end_time = datetime.datetime.now().strftime('%Y-%m-%d')
2024-11-14 16:38:30 +08:00
# 重命名时间列,预测列
2025-03-05 09:47:02 +08:00
df.rename(columns={datecol: 'ds'}, inplace=True)
df.rename(columns={y: 'y'}, inplace=True)
2024-11-01 16:38:21 +08:00
# 按时间顺序排列
2025-03-05 09:47:02 +08:00
df.sort_values(by='ds', inplace=True)
2024-11-01 16:38:21 +08:00
df['ds'] = pd.to_datetime(df['ds'])
2025-03-05 09:47:02 +08:00
# 获取start_year年到end_time的数据
df = df[df['ds'].dt.year >= config.start_year]
2024-11-01 16:38:21 +08:00
df = df[df['ds'] <= end_time]
2024-12-05 16:35:37 +08:00
# last_update_times_df,y_last_update_time = create_feature_last_update_time(df)
# config.logger.info(f'删除预警的特征前数据量:{df.shape}')
2024-12-05 16:35:37 +08:00
# columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist()
# df = df.drop(columns = columns_to_drop)
# config.logger.info(f'删除预警的特征后数据量:{df.shape}')
2024-12-05 16:35:37 +08:00
# if is_update_warning_data:
# upload_warning_info(last_update_times_df,y_last_update_time)
# 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列
if config.is_del_tow_month:
current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=180)
config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
columns_to_drop = []
for clo in df.columns:
2025-03-05 09:47:02 +08:00
if check_column(df, clo, two_months_ago):
columns_to_drop.append(clo)
df = df.drop(columns=columns_to_drop)
config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
# 衍生时间特征
if is_timefurture:
2025-03-05 09:47:02 +08:00
df = addtimecharacteristics(df=df, dataset=dataset)
if config.freq == 'WW':
# 自定义周数据
# 按weekofmothe分组取均值得到新的数据
df = df.groupby(df['yearmonthweeks']).mean()
# 时间列转换为日期格式字符串
df['ds'] = df['ds'].dt.strftime('%Y-%m-%d')
elif config.freq == 'W':
# 按周取样
df = df.resample('W', on='ds').mean().reset_index()
elif config.freq == 'M':
# 按月取样
df = df.resample('M', on='ds').mean().reset_index()
2024-11-01 16:38:21 +08:00
# 删除预测列空值的行
''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 '''
# df = df.dropna(subset=['y'])
config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
2024-11-01 16:38:21 +08:00
df = df.dropna(axis=1, how='all')
config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
2024-11-01 16:38:21 +08:00
# 去掉指标列表中的columns_to_drop的行
2025-03-05 09:47:02 +08:00
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
df.columns.tolist())]
df_zhibiaoliebiao.to_csv(os.path.join(
dataset, '特征处理后的指标名称及分类.csv'), index=False)
2024-11-14 10:21:25 +08:00
# 数据频度分析
2024-11-01 16:38:21 +08:00
featurePindu(dataset=dataset)
# 向上填充
df = df.ffill()
# 向下填充
df = df.bfill()
# 删除周六日的数据
if delweekenday:
df = df[df['ds'].dt.weekday < 5]
2025-03-05 09:47:02 +08:00
2024-11-14 10:21:25 +08:00
# kdj指标
2024-11-01 16:38:21 +08:00
if add_kdj:
df = calculate_kdj(df)
2025-03-05 09:47:02 +08:00
2024-11-14 10:21:25 +08:00
# 特征分析
2025-03-05 09:47:02 +08:00
featureAnalysis(df, dataset=dataset, y=y)
2024-11-01 16:38:21 +08:00
return df
2025-03-05 09:47:02 +08:00
def zhoududatachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_time='', y='y', dataset='dataset', delweekenday=False, add_kdj=False, is_timefurture=False):
2025-02-13 11:25:03 +08:00
'''
原油特征周度数据处理函数
接收的是两个df一个是指标数据一个是指标列表
输出的是一个df包含dsy指标列
'''
df = df_zhibiaoshuju.copy()
if end_time == '':
end_time = datetime.datetime.now().strftime('%Y-%m-%d')
# 重命名时间列,预测列
2025-03-05 09:47:02 +08:00
df.rename(columns={datecol: 'ds'}, inplace=True)
df.rename(columns={y: 'y'}, inplace=True)
2025-02-13 11:25:03 +08:00
# 按时间顺序排列
2025-03-05 09:47:02 +08:00
df.sort_values(by='ds', inplace=True)
2025-02-13 11:25:03 +08:00
df['ds'] = pd.to_datetime(df['ds'])
2025-03-05 09:47:02 +08:00
# 获取start_year年到end_time的数据
df = df[df['ds'].dt.year >= config.start_year]
2025-02-13 11:25:03 +08:00
df = df[df['ds'] <= end_time]
# last_update_times_df,y_last_update_time = create_feature_last_update_time(df)
# config.logger.info(f'删除预警的特征前数据量:{df.shape}')
2025-02-13 11:25:03 +08:00
# columns_to_drop = last_update_times_df[last_update_times_df['warning_date'] < y_last_update_time ]['feature'].values.tolist()
# df = df.drop(columns = columns_to_drop)
# config.logger.info(f'删除预警的特征后数据量:{df.shape}')
2025-02-13 11:25:03 +08:00
# if is_update_warning_data:
# upload_warning_info(last_update_times_df,y_last_update_time)
# 去掉近最后数据对应的日期在六月以前的列删除近2月的数据是常熟的列
if config.is_del_tow_month:
2025-02-13 11:25:03 +08:00
current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=180)
config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
2025-02-13 11:25:03 +08:00
columns_to_drop = []
for clo in df.columns:
2025-03-05 09:47:02 +08:00
if check_column(df, clo, two_months_ago):
2025-02-13 11:25:03 +08:00
columns_to_drop.append(clo)
df = df.drop(columns=columns_to_drop)
config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
if config.freq == 'W':
2025-02-13 11:25:03 +08:00
# 按周取样
df = df.resample('W', on='ds').mean().reset_index()
elif config.freq == 'M':
2025-02-13 11:25:03 +08:00
# 按月取样
df = df.resample('M', on='ds').mean().reset_index()
# 删除预测列空值的行
''' 工作日缺失,如果删除,会影响预测结果,导致统计准确率出错 '''
# df = df.dropna(subset=['y'])
config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
2025-02-13 11:25:03 +08:00
df = df.dropna(axis=1, how='all')
config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
2025-02-13 11:25:03 +08:00
# 去掉指标列表中的columns_to_drop的行
2025-03-05 09:47:02 +08:00
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
df.columns.tolist())]
df_zhibiaoliebiao.to_csv(os.path.join(
dataset, '特征处理后的指标名称及分类.csv'), index=False)
2025-02-13 11:25:03 +08:00
# 数据频度分析
featurePindu(dataset=dataset)
# 向上填充
df = df.ffill()
# 向下填充
df = df.bfill()
# 删除周六日的数据
if delweekenday:
df = df[df['ds'].dt.weekday < 5]
2025-03-05 09:47:02 +08:00
2025-02-13 11:25:03 +08:00
# kdj指标
if add_kdj:
df = calculate_kdj(df)
# 衍生时间特征
if is_timefurture:
2025-03-05 09:47:02 +08:00
df = addtimecharacteristics(df=df, dataset=dataset)
2025-02-13 11:25:03 +08:00
# 特征分析
2025-03-05 09:47:02 +08:00
featureAnalysis(df, dataset=dataset, y=y)
2025-02-13 11:25:03 +08:00
return df
2025-03-05 09:47:02 +08:00
def datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol='date', end_time='', y='y', dataset='dataset', delweekenday=False, add_kdj=False, is_timefurture=False):
2024-11-14 10:21:25 +08:00
'''
聚烯烃特征数据处理函数
接收的是两个df一个是指标数据一个是指标列表
输出的是一个df包含dsy指标列
'''
df = df_zhibiaoshuju.copy()
if end_time == '':
end_time = datetime.datetime.now().strftime('%Y-%m-%d')
# date转为pddate
2025-03-05 09:47:02 +08:00
df.rename(columns={datecol: 'ds'}, inplace=True)
2024-11-14 10:21:25 +08:00
# 指定列统一减少数值
df[config.offsite_col] = df[config.offsite_col]-config.offsite
2024-11-14 10:21:25 +08:00
# 预测列为avg_cols的均值
df[y] = df[config.avg_cols].mean(axis=1)
2024-11-14 10:21:25 +08:00
# 去掉多余的列avg_cols
df = df.drop(columns=config.avg_cols)
2024-11-14 10:21:25 +08:00
# 重命名预测列
2025-03-05 09:47:02 +08:00
df.rename(columns={y: 'y'}, inplace=True)
# 按时间顺序排列
2025-03-05 09:47:02 +08:00
df.sort_values(by='ds', inplace=True)
df['ds'] = pd.to_datetime(df['ds'])
2025-03-05 09:47:02 +08:00
# 获取2018年到当前日期的数据
df = df[df['ds'].dt.year >= 2018]
# 获取小于等于当前日期的数据
df = df[df['ds'] <= end_time]
config.logger.info(f'删除两月不更新特征前数据量:{df.shape}')
2024-11-14 10:21:25 +08:00
# 去掉近最后数据对应的日期在两月以前的列删除近2月的数据是常数的列
current_date = datetime.datetime.now()
two_months_ago = current_date - timedelta(days=40)
2025-03-05 09:47:02 +08:00
# 检查两月不更新的特征
def check_column(col_name):
if 'ds' in col_name or 'y' in col_name:
return False
2025-03-05 09:47:02 +08:00
df_check_column = df[['ds', col_name]]
df_check_column = df_check_column.dropna()
if len(df_check_column) == 0:
return True
2025-03-05 09:47:02 +08:00
if df_check_column[(df_check_column['ds'] >= two_months_ago)].groupby(col_name).ngroups < 2:
return True
corresponding_date = df_check_column.iloc[-1]['ds']
return corresponding_date < two_months_ago
columns_to_drop = df.columns[df.columns.map(check_column)].tolist()
2025-03-05 09:47:02 +08:00
df = df.drop(columns=columns_to_drop)
config.logger.info(f'删除两月不更新特征后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
# 删除预测列空值的行
df = df.dropna(subset=['y'])
config.logger.info(f'删除预测列为空值的行后数据量:{df.shape}')
df = df.dropna(axis=1, how='all')
config.logger.info(f'删除全为空值的列后数据量:{df.shape}')
2025-03-05 09:47:02 +08:00
df.to_csv(os.path.join(dataset, '未填充的特征数据.csv'), index=False)
# 去掉指标列表中的columns_to_drop的行
2025-03-05 09:47:02 +08:00
df_zhibiaoliebiao = df_zhibiaoliebiao[df_zhibiaoliebiao['指标名称'].isin(
df.columns.tolist())]
df_zhibiaoliebiao.to_csv(os.path.join(
dataset, '特征处理后的指标名称及分类.csv'), index=False)
# 频度分析
featurePindu(dataset=dataset)
# 向上填充
df = df.ffill()
# 向下填充
df = df.bfill()
# 删除周六日的数据
if delweekenday:
df = df[df['ds'].dt.weekday < 5]
2025-03-05 09:47:02 +08:00
if add_kdj:
df = calculate_kdj(df)
2025-03-05 09:47:02 +08:00
if is_timefurture:
2025-03-05 09:47:02 +08:00
df = addtimecharacteristics(df=df, dataset=dataset)
2025-03-05 09:47:02 +08:00
featureAnalysis(df, dataset=dataset, y=y)
return df
2025-03-05 09:47:02 +08:00
def getdata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
2024-11-01 16:38:21 +08:00
# 判断后缀名 csv或excel
if filename.endswith('.csv'):
df = loadcsv(filename)
else:
# 读取excel 指标数据
2025-03-05 09:47:02 +08:00
df_zhibiaoshuju = pd.read_excel(filename, sheet_name='指标数据')
df_zhibiaoliebiao = pd.read_excel(filename, sheet_name='指标列表')
2024-11-14 16:38:30 +08:00
2024-11-01 16:38:21 +08:00
# 日期字符串转为datatime
2025-03-05 09:47:02 +08:00
df = datachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol, y=y, dataset=dataset,
add_kdj=add_kdj, is_timefurture=is_timefurture, end_time=end_time)
return df, df_zhibiaoliebiao
2024-11-01 16:38:21 +08:00
2025-02-11 16:31:52 +08:00
2025-03-05 09:47:02 +08:00
def getzhoududata(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
2025-02-13 11:25:03 +08:00
# 判断后缀名 csv或excel
if filename.endswith('.csv'):
df = loadcsv(filename)
else:
# 读取excel 指标数据
2025-03-05 09:47:02 +08:00
df_zhibiaoshuju = pd.read_excel(filename, sheet_name='指标数据')
df_zhibiaoliebiao = pd.read_excel(filename, sheet_name='指标列表')
2025-02-13 11:25:03 +08:00
# 日期字符串转为datatime
2025-03-05 09:47:02 +08:00
df = zhoududatachuli(df_zhibiaoshuju, df_zhibiaoliebiao, datecol, y=y, dataset=dataset,
add_kdj=add_kdj, is_timefurture=is_timefurture, end_time=end_time)
2025-02-13 11:25:03 +08:00
2025-03-05 09:47:02 +08:00
return df, df_zhibiaoliebiao
2025-02-13 11:25:03 +08:00
2025-03-05 09:47:02 +08:00
def getdata_juxiting(filename, datecol='date', y='y', dataset='', add_kdj=False, is_timefurture=False, end_time=''):
config.logger.info('getdata接收'+filename+' '+datecol+' '+end_time)
# 判断后缀名 csv或excel
if filename.endswith('.csv'):
df = loadcsv(filename)
else:
# 读取excel 指标数据
2025-03-05 09:47:02 +08:00
df_zhibiaoshuju = pd.read_excel(filename, sheet_name='指标数据')
df_zhibiaoliebiao = pd.read_excel(filename, sheet_name='指标列表')
# 日期字符串转为datatime
2025-03-05 09:47:02 +08:00
df = datachuli_juxiting(df_zhibiaoshuju, df_zhibiaoliebiao, datecol, y=y, dataset=dataset,
add_kdj=add_kdj, is_timefurture=is_timefurture, end_time=end_time)
2025-03-05 09:47:02 +08:00
return df, df_zhibiaoliebiao
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
def sanitize_filename(filename):
# 使用正则表达式替换不合规的字符
# 这里我们替换为下划线'_',但你可以根据需要选择其他字符
sanitized = re.sub(r'[\\/*?:"<>|\s]', '_', filename)
# 移除开头的点(在某些系统中,以点开头的文件可能是隐藏的)
sanitized = re.sub(r'^\.', '', sanitized)
# 如果需要,可以添加更多替换规则
return sanitized
2024-11-01 16:38:21 +08:00
class Config:
# 核心配置
@property
def logger(self): return global_config['logger']
@property
def dataset(self): return global_config['dataset']
@property
def y(self): return global_config['y']
@property
def is_fivemodels(self): return global_config['is_fivemodels']
# 模型参数
@property
def data_set(self): return global_config['data_set']
@property
def input_size(self): return global_config['input_size']
@property
def horizon(self): return global_config['horizon']
@property
def train_steps(self): return global_config['train_steps']
@property
def val_check_steps(self): return global_config['val_check_steps']
@property
def rote(self): return global_config['rote']
# 特征工程开关
@property
def is_del_corr(self): return global_config['is_del_corr']
@property
def is_del_tow_month(self): return global_config['is_del_tow_month']
@property
def is_eta(self): return global_config['is_eta']
@property
def is_update_eta(self): return global_config['is_update_eta']
@property
def is_update_eta_data(self): return global_config['is_update_eta_data']
@property
def is_update_report(self): return global_config['is_update_report']
# 时间参数
@property
def start_year(self): return global_config['start_year']
@property
def end_time(self): return global_config['end_time']
@property
def freq(self): return global_config['freq']
# 接口配置
@property
def upload_url(self): return global_config['upload_url']
@property
def login_pushreport_url(
self): return global_config['login_pushreport_url']
@property
def login_data(self): return global_config['login_data']
@property
def upload_headers(self): return global_config['upload_headers']
@property
def upload_warning_url(self): return global_config['upload_warning_url']
@property
def upload_warning_data(self): return global_config['upload_warning_data']
# 查询接口
@property
def query_data_list_item_nos_url(
self): return global_config['query_data_list_item_nos_url']
@property
def query_data_list_item_nos_data(
self): return global_config['query_data_list_item_nos_data']
# 字段映射
@property
def offsite_col(self): return global_config['offsite_col']
@property
def avg_col(self): return global_config['avg_col']
@property
def offsite(self): return global_config['offsite']
@property
def edbcodenamedict(self): return global_config['edbcodenamedict']
# ETA配置
@property
def APPID(self): return global_config['APPID']
@property
def SECRET(self): return global_config['SECRET']
# 数据库配置
@property
def sqlitedb(self): return global_config['sqlitedb']
config = Config()
2024-11-01 16:38:21 +08:00
class BinanceAPI:
'''
获取 Binance API 请求头签名
'''
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
def __init__(self, APPID, SECRET):
self.APPID = APPID
self.SECRET = SECRET
self.get_signature()
# 生成随机字符串作为 nonce
def generate_nonce(self, length=32):
2025-03-05 09:47:02 +08:00
self.nonce = ''.join(random.choices(
string.ascii_letters + string.digits, k=length))
2024-11-01 16:38:21 +08:00
return self.nonce
# 获取当前时间戳(秒)
def get_timestamp(self):
return int(time.time())
# 构建待签名字符串
def build_sign_str(self):
return f'appid={self.APPID}&nonce={self.nonce}&timestamp={self.timestamp}'
# 使用 HMAC SHA-256 计算签名
def calculate_signature(self, secret, message):
return base64.urlsafe_b64encode(hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha256).digest()).decode('utf-8')
def get_signature(self):
# 调用上述方法生成签名
self.nonce = self.generate_nonce()
self.timestamp = self.get_timestamp()
self.sign_str = self.build_sign_str()
self.signature = self.calculate_signature(self.SECRET, self.sign_str)
# return self.signature
2024-12-06 10:15:28 +08:00
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
class Graphs:
# 绘制标题
@staticmethod
def draw_title(title: str):
# 获取所有样式表
style = getSampleStyleSheet()
# 拿到标题样式
ct = style['Heading1']
# 单独设置样式相关属性
ct.fontName = 'SimSun' # 字体名
ct.fontSize = 18 # 字体大小
ct.leading = 50 # 行间距
ct.textColor = colors.green # 字体颜色
ct.alignment = 1 # 居中
ct.bold = True
# 创建标题对应的段落,并且返回
return Paragraph(title, ct)
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 绘制小标题
@staticmethod
def draw_little_title(title: str):
# 获取所有样式表
style = getSampleStyleSheet()
# 拿到标题样式
ct = style['Normal']
# 单独设置样式相关属性
ct.fontName = 'SimSun' # 字体名
ct.fontSize = 15 # 字体大小
ct.leading = 30 # 行间距
ct.textColor = colors.red # 字体颜色
# 创建标题对应的段落,并且返回
return Paragraph(title, ct)
# 绘制普通段落内容
@staticmethod
def draw_text(text: str):
# 获取所有样式表
style = getSampleStyleSheet()
# 获取普通样式
ct = style['Normal']
ct.fontName = 'SimSun'
ct.fontSize = 12
ct.wordWrap = 'CJK' # 设置自动换行
ct.alignment = 0 # 左对齐
ct.firstLineIndent = 32 # 第一行开头空格
ct.leading = 25
return Paragraph(text, ct)
# 绘制表格
@staticmethod
def draw_table(*args):
# 列宽度
col_width = args[0]
style = [
('FONTNAME', (0, 0), (-1, -1), 'SimSun'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 12), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('BACKGROUND', (0, 0), (-1, 0), '#d5dae6'), # 设置第一行背景颜色
('ALIGN', (0, 0), (-1, -1), 'CENTER'), # 第一行水平居中
('ALIGN', (0, 1), (-1, -1), 'LEFT'), # 第二行到最后一行左右左对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
('TEXTCOLOR', (0, 0), (-1, -1), colors.darkslategray), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.5, colors.grey), # 设置表格框线为grey色线宽为0.5
# ('SPAN', (0, 1), (0, 2)), # 合并第一列二三行
# ('SPAN', (0, 3), (0, 4)), # 合并第一列三四行
# ('SPAN', (0, 5), (0, 6)), # 合并第一列五六行
# ('SPAN', (0, 7), (0, 8)), # 合并第一列五六行
]
table = Table(args[1:], colWidths=col_width, style=style)
return table
# 创建图表
@staticmethod
def draw_bar(bar_data: list, ax: list, items: list):
drawing = Drawing(500, 250)
bc = VerticalBarChart()
bc.x = 45 # 整个图表的x坐标
bc.y = 45 # 整个图表的y坐标
bc.height = 200 # 图表的高度
bc.width = 350 # 图表的宽度
bc.data = bar_data
bc.strokeColor = colors.black # 顶部和右边轴线的颜色
bc.valueAxis.valueMin = 5000 # 设置y坐标的最小值
bc.valueAxis.valueMax = 26000 # 设置y坐标的最大值
bc.valueAxis.valueStep = 2000 # 设置y坐标的步长
bc.categoryAxis.labels.dx = 2
bc.categoryAxis.labels.dy = -8
bc.categoryAxis.labels.angle = 20
bc.categoryAxis.categoryNames = ax
# 图示
leg = Legend()
leg.fontName = 'SimSun'
leg.alignment = 'right'
leg.boxAnchor = 'ne'
leg.x = 475 # 图例的x坐标
leg.y = 240
leg.dxTextSpace = 10
leg.columnMaximum = 3
leg.colorNamePairs = items
drawing.add(leg)
drawing.add(bc)
return drawing
# 绘制图片
@staticmethod
def draw_img(path):
img = Image(path) # 读取指定路径下的图片
img.drawWidth = 20*cm # 设置图片的宽度
img.drawHeight = 10*cm # 设置图片的高度
return img
# 定义样式函数
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
def style_row(row):
if '' in row['频度']:
2025-03-05 09:47:02 +08:00
return ['background-color: yellow'] * len(row)
2024-11-01 16:38:21 +08:00
else:
2025-03-05 09:47:02 +08:00
return ['background-color: gray'] * len(row)
2024-11-01 16:38:21 +08:00
class EtaReader():
2025-03-05 09:47:02 +08:00
def __init__(self, signature, classifylisturl, classifyidlisturl, edbcodedataurl, edbcodelist, edbdatapushurl, edbdeleteurl, edbbusinessurl):
2024-12-06 10:15:28 +08:00
'''
初始化 EtaReader 类的实例
参数:
signature (str): 用于 API 请求的签名
classifylisturl (str): 分类列表的 URL
classifyidlisturl (str): 分类 ID 列表的 URL
edbcodedataurl (str): EDB 代码数据的 URL
edbdatapushurl (str): EDB 数据推送的 URL
edbcodelist (str): EDB 代码列表的 URL
edbdeleteurl (str): EDB 数据删除的 URL
edbbusinessurl (str): EDB 业务数据的 URL
返回:
None
'''
2024-11-01 16:38:21 +08:00
self.signature = signature
self.classifylisturl = classifylisturl
self.classifyidlisturl = classifyidlisturl
2025-03-05 09:47:02 +08:00
self.edbcodedataurl = edbcodedataurl
2024-11-01 16:38:21 +08:00
self.edbdatapushurl = edbdatapushurl
2025-03-05 09:47:02 +08:00
self.edbcodelist = edbcodelist
self.edbdeleteurl = edbdeleteurl
2024-11-01 16:38:21 +08:00
self.edbbusinessurl = edbbusinessurl
2024-12-06 10:15:28 +08:00
2025-03-05 09:47:02 +08:00
def filter_yuanyou_data(self, ClassifyName, data):
2024-11-01 16:38:21 +08:00
'''
指标名称保留规则
'''
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 包含 关键词 去除, 返回flase
2025-03-05 09:47:02 +08:00
if any(keyword in data for keyword in ['运费', '检修', '波动率', '地缘政治', '股价',
'同比', '环比', '环差', '裂差', '4WMA', '变频', '道琼斯', '标普500', '纳斯达克',
'四周均值', '名占比', '残差', 'DMA',
'连7-连9', '4周平均', '4周均值', '滚动相关性', '日本']):
2024-11-01 16:38:21 +08:00
return False
2025-03-05 09:47:02 +08:00
# 检查需要的特征
2024-11-01 16:38:21 +08:00
# 去掉 分析 分类下的数据
if ClassifyName == '分析':
return False
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 保留 库存中特殊关键词
if ClassifyName == '库存':
2025-03-05 09:47:02 +08:00
if any(keyword in data for keyword in ['原油', '美国', '全球', '中国', '富查伊拉', 'ARA']):
2024-11-01 16:38:21 +08:00
return True
else:
pass
else:
pass
2025-03-05 09:47:02 +08:00
# 去掉 持仓中不是基金的数据
2024-11-01 16:38:21 +08:00
if ClassifyName == '持仓':
if '基金' not in data:
return False
else:
pass
else:
pass
2025-03-05 09:47:02 +08:00
# 去掉 航班中不是中国、美国 的数据
2024-11-01 16:38:21 +08:00
if ClassifyName == '需求':
2025-03-05 09:47:02 +08:00
if '航班' in data:
if '中国' in data or '美国' in data:
2024-11-01 16:38:21 +08:00
return True
else:
return False
else:
pass
else:
pass
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 分类为 期货市场,同质性数据取第一个
if ClassifyName == '期货市场':
# 去掉c1-9 以后的
if 'c1-c' in data:
try:
c = int(data.split('c1-c')[1])
except:
return False
2025-03-05 09:47:02 +08:00
if c > 9:
2024-11-01 16:38:21 +08:00
return False
else:
pass
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
else:
pass
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 判断 同质性数据, 字符串开头
2025-03-05 09:47:02 +08:00
strstartdict = {'ICE Brent c': "ICE Brent c14",
'NYMWX WTI c': "NYMWX WTI c5",
'INE SC c': "INE SC c1",
'EFS c': "EFS c",
'Dubai Swap c': "Dubai Swap c1",
'Oman Swap c': "Oman Swap c1",
'DME Oman c': "DME Oman c1",
'Murban Futures c': "Murban Futures c1",
'Dubai连合约价格': 'Dubai连1合约价格',
'美国RBOB期货月份合约价格': '美国RBOB期货2309月份合约价格',
'Brent连合约价格': 'Brent连1合约价格',
'WTI连合约价格': 'WTI连1合约价格',
'布伦特连合约价格': 'Brent连1合约价格',
'Brent 连合约价格': 'Brent连1合约价格',
'Dubai连合约价格': 'Dubai连1合约价格',
'Brent连': 'Brent连1合约价格',
'brent连': 'Brent连1合约价格',
}
2024-11-01 16:38:21 +08:00
# 判断名称字符串开头是否在 strstartdict.keys中
match = re.match(r'([a-zA-Z\s]+)(\d+)', data)
if match:
part1 = match.group(1)
part2 = match.group(2)
if part1 in [i for i in strstartdict.keys()]:
if data == strstartdict[part1]:
return True
else:
return False
# data = 'Brent 连7合约价格'
# 判断名称字符串去掉数字后是否在 strstartdict.keys中
match = re.findall(r'\D+', data)
2025-03-05 09:47:02 +08:00
if match:
2024-11-01 16:38:21 +08:00
if len(match) == 2:
part1 = match[0]
part2 = match[1]
2025-03-05 09:47:02 +08:00
if part1+part2 in [i for i in strstartdict.keys()]:
2024-11-01 16:38:21 +08:00
if data == strstartdict[part1+part2]:
return True
else:
return False
else:
pass
elif len(match) == 1:
match = re.findall(r'\D+', data)
part1 = match[0]
2025-03-05 09:47:02 +08:00
if part1 in [i for i in strstartdict.keys()]:
2024-11-01 16:38:21 +08:00
if data == strstartdict[part1]:
return True
else:
return False
else:
pass
else:
pass
2024-11-15 17:15:32 +08:00
# 去掉kpler数据源
if 'Kpler' in ClassifyName or 'kpler' in ClassifyName:
return False
2024-11-01 16:38:21 +08:00
return True
2025-03-05 09:47:02 +08:00
def filter_pp_data(self, ClassifyName, data):
2024-11-01 16:38:21 +08:00
'''
指标名称保留规则
'''
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 包含 关键词 去除, 返回flase
# if any(keyword in data for keyword in ['运费','检修','波动率','地缘政治','股价',
# '同比','环比','环差','裂差','4WMA','变频','道琼斯','标普500','纳斯达克',
# '四周均值','名占比','残差','DMA',
# '连7-连9','4周平均','4周均值','滚动相关性','日本']):
# return False
# 包含 关键词 保留, 返回True
if any(keyword in data for keyword in ['拉丝']):
return True
2025-03-05 09:47:02 +08:00
# 检查需要的特征
2024-11-01 16:38:21 +08:00
# 去掉 期货市场 分类下的数据
if ClassifyName == '期货市场':
return False
else:
pass
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 保留 库存 下所有指标
if ClassifyName == '库存':
return True
else:
pass
# 保留 进出口 下所有指标
if ClassifyName == '进出口':
return True
else:
pass
# 保留 价差 下所有指标
if ClassifyName == '价差':
return True
else:
pass
# 保留 供应 下所有指标
if ClassifyName == '供应':
return True
else:
pass
# 保留 需求 下所有指标
if ClassifyName == '需求':
return True
else:
pass
return True
# 通过edbcode 获取指标数据
2025-03-05 09:47:02 +08:00
def edbcodegetdata(self, df, EdbCode, EdbName):
2024-11-01 16:38:21 +08:00
# 根据指标id获取指标数据
url = self.edbcodedataurl+str(EdbCode)
2025-03-05 09:47:02 +08:00
# 发送GET请求
response = requests.get(url, headers=self.headers)
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
data = response.json() # 假设接口返回的是JSON数据
all_data_items = data.get('Data')
# 列表转换为DataFrame
2025-03-05 09:47:02 +08:00
df3 = pd.DataFrame(all_data_items, columns=[
'DataTime', 'Value', 'UpdateTime'])
2024-11-01 16:38:21 +08:00
# df3 = pd.read_json(all_data_items, orient='records')
# 去掉UpdateTime 列
df3 = df3.drop(columns=['UpdateTime'])
# df3.set_index('DataTime')
df3.rename(columns={'Value': EdbName}, inplace=True)
# 将数据存储df1
2025-03-05 09:47:02 +08:00
df = pd.merge(df, df3, how='outer',
on='DataTime', suffixes=('', '_y'))
# 按时间排序
df = df.sort_values(by='DataTime', ascending=True)
2024-11-01 16:38:21 +08:00
return df
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
else:
# 请求失败,打印错误信息
config.logger.info(
f'Error: {response.status_code}, {response.text}')
2024-11-01 16:38:21 +08:00
# 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}')
2025-03-05 09:47:02 +08:00
def get_eta_api_yuanyou_data(self, data_set, dataset=''):
2024-12-06 10:15:28 +08:00
'''
从ETA API获取原油数据
参数:
data_set (str): 数据集名称
dataset (str): 数据集ID默认为空
返回:
None
'''
2024-11-01 16:38:21 +08:00
today = datetime.date.today().strftime("%Y-%m-%d")
2025-03-05 09:47:02 +08:00
# 定义你的headers这里可以包含多个参数
self.headers = {
'nonce': self.signature.nonce, # 例如,一个认证令牌
# 自定义的header参数
'timestamp': str(self.signature.timestamp),
2024-11-01 16:38:21 +08:00
'appid': self.signature.APPID, # 另一个自定义的header参数
'signature': self.signature.signature
}
# 从列表数据中获取指标名称,判断指标名称频度是否为日 如果是则获取UniqueCode然后获取指标数据保存到xlat文件中的sheet表。
'''
df = sheetname 指标列表存储 指标分类-指标名称-指标id-频度
df1 = sheetname 指标数据 ,存储 时间-指标名称1-指标名称2...
'''
# 构建新的DataFrame df df1
2025-03-05 09:47:02 +08:00
df = pd.DataFrame(columns=[
'指标分类', '指标名称', '指标id', '频度', '指标来源', '来源id', '最后更新时间', '更新周期', '预警日期', '停更周期'])
2024-11-01 16:38:21 +08:00
df1 = pd.DataFrame(columns=['DataTime'])
# 外网环境无法访问,请确认是否为内网环境
try:
# 发送GET请求 获取指标分类列表
2025-03-05 09:47:02 +08:00
response = requests.get(self.classifylisturl, headers=self.headers)
2024-11-01 16:38:21 +08:00
except requests.exceptions.RequestException as e:
2025-03-05 09:47:02 +08:00
raise Exception(f"请求失败,请确认是否为内网环境: {e}", "\033[0m")
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
# 获取成功, 处理响应内容
2025-03-05 09:47:02 +08:00
data = response.json() # 假设接口返回的是JSON数据
# 请求成功,处理响应内容
# config.logger.info(data.get('Data'))
2025-03-05 09:47:02 +08:00
# 定义你想要保留的固定值
2024-11-01 16:38:21 +08:00
fixed_value = 1214
2025-03-05 09:47:02 +08:00
# 遍历列表,只保留那些'category' key的值为固定值的数据项
filtered_data = [item for item in data.get(
'Data') if item.get('ParentId') == fixed_value]
# 然后循环filtered_data去获取list数据才能获取到想要获取的ClassifyId
2024-11-01 16:38:21 +08:00
n = 0
for item in filtered_data:
2025-03-05 09:47:02 +08:00
n += 1
2024-11-01 16:38:21 +08:00
# if n>50:
# break
2025-03-05 09:47:02 +08:00
ClassifyId = item["ClassifyId"] # 分类id分类下的指标列表接口的请求参数
ClassifyName = item["ClassifyName"] # 分类名称要保存到df的指标分类列
2024-11-01 16:38:21 +08:00
# 根据分类id获取指标列表
url = self.classifyidlisturl+str(ClassifyId)
response = requests.get(url, headers=self.headers)
2025-03-05 09:47:02 +08:00
if response.status_code == 200:
# config.logger.info(response.text)
2024-11-01 16:38:21 +08:00
data2 = response.json()
Data = data2.get('Data')
for i in Data:
# s+= 1
EdbCode = i.get('EdbCode')
2025-03-05 09:47:02 +08:00
# 指标名称要保存到df2的指标名称列,df的指标名称列
EdbName = i.get('EdbName')
2024-11-01 16:38:21 +08:00
Frequency = i.get('Frequency') # 频度要保存到df的频度列
SourceName = i.get('SourceName') # 来源名称要保存到df的频度列
Source = i.get('Source') # 来源ID要保存到df的频度列
2024-12-27 14:15:20 +08:00
Unit = i.get('Unit') # 单位要保存到df的单位列
2024-11-01 16:38:21 +08:00
# 频度不是 日 或者 周的 跳过
2025-03-05 09:47:02 +08:00
if Frequency not in ['日度', '周度', '', '']:
2024-11-01 16:38:21 +08:00
continue
2025-03-05 09:47:02 +08:00
# 只保留手工数据中,名称带有 海运出口 海运进口
if Source == 9 and not ('海运出口' in EdbName or '海运进口' in EdbName):
continue
2025-03-05 09:47:02 +08:00
# 不要wind数据
if Source == 2:
continue
2024-11-01 16:38:21 +08:00
# 判断名称是否需要保存
2025-03-05 09:47:02 +08:00
isSave = self.filter_yuanyou_data(
ClassifyName, EdbName)
2024-11-01 16:38:21 +08:00
if isSave:
# 保存到df
2025-03-05 09:47:02 +08:00
df1 = self.edbcodegetdata(df1, EdbCode, EdbName)
# 取df1所有行最后一列
2025-03-05 09:47:02 +08:00
edbname_df = df1[['DataTime', f'{EdbName}']]
edbname_df = edbname_df.dropna()
2025-03-05 09:47:02 +08:00
if len(edbname_df) == 0:
config.logger.info(
f'指标名称:{EdbName} 没有数据')
continue
try:
2025-03-05 09:47:02 +08:00
time_sequence = edbname_df['DataTime'].values.tolist(
)[-10:]
except IndexError:
2025-03-05 09:47:02 +08:00
time_sequence = edbname_df['DataTime'].values.tolist(
)
# 使用Counter来统计每个星期几的出现次数
from collections import Counter
2025-03-05 09:47:02 +08:00
weekday_counter = Counter(datetime.datetime.strptime(
time_str, "%Y-%m-%d").strftime('%A') for time_str in time_sequence)
# 打印出现次数最多的星期几
try:
2025-03-05 09:47:02 +08:00
most_common_weekday = weekday_counter.most_common(1)[
0][0]
# 计算两周后的日期
2025-03-05 09:47:02 +08:00
warning_date = (datetime.datetime.strptime(
time_sequence[-1], "%Y-%m-%d") + datetime.timedelta(weeks=2)).strftime("%Y-%m-%d")
stop_update_period = (datetime.datetime.strptime(
today, "%Y-%m-%d") - datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d")).days // 7
2024-12-03 14:56:06 +08:00
except IndexError:
most_common_weekday = '其他'
2024-12-03 14:56:06 +08:00
stop_update_period = 0
if '' in Frequency:
most_common_weekday = '每天'
2025-03-05 09:47:02 +08:00
warning_date = (datetime.datetime.strptime(
time_sequence[-1], "%Y-%m-%d") + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
stop_update_period = (datetime.datetime.strptime(
today, "%Y-%m-%d") - datetime.datetime.strptime(time_sequence[-1], "%Y-%m-%d")).days
2024-11-01 16:38:21 +08:00
# 保存频度 指标名称 分类 指标id 到 df
2025-03-05 09:47:02 +08:00
df2 = pd.DataFrame({'指标分类': ClassifyName,
'指标名称': EdbName,
'指标id': EdbCode,
2024-12-27 14:15:20 +08:00
'单位': Unit,
'频度': Frequency,
2025-03-05 09:47:02 +08:00
'指标来源': SourceName,
'来源id': Source,
'最后更新时间': edbname_df['DataTime'].values[-1],
'更新周期': most_common_weekday,
'预警日期': warning_date,
'停更周期': stop_update_period}, index=[0],
2024-12-27 14:15:20 +08:00
)
2025-03-05 09:47:02 +08:00
# df = pd.merge(df, df2, how='outer')
2024-11-01 16:38:21 +08:00
df = pd.concat([df, df2])
else:
config.logger.info(f'跳过指标 {EdbName}')
2024-11-01 16:38:21 +08:00
# 找到列表中不在指标列中的指标id保存成新的list
2025-03-05 09:47:02 +08:00
new_list = [
item for item in self.edbcodelist if item not in df['指标id'].tolist()]
config.logger.info(new_list)
2024-11-01 16:38:21 +08:00
# 遍历new_list获取指标数据保存到df1
for item in new_list:
config.logger.info(item)
2024-11-01 16:38:21 +08:00
# 将item 加入到 df['指标id']中
try:
itemname = config.edbcodenamedict[item]
2024-11-01 16:38:21 +08:00
except:
itemname = item
2025-03-05 09:47:02 +08:00
df1 = self.edbcodegetdata(df1, item, itemname)
df = pd.concat([df, pd.DataFrame(
{'指标分类': '其他', '指标名称': itemname, '指标id': item, '频度': '其他', '指标来源': '其他', '来源id': '其他'}, index=[0])])
2024-11-01 16:38:21 +08:00
# 按时间排序
2025-03-05 09:47:02 +08:00
df1.sort_values('DataTime', inplace=True, ascending=False)
df1.rename(columns={'DataTime': 'date'}, inplace=True)
2024-11-01 16:38:21 +08:00
# df1.dropna(inplace=True)
# 去掉大于今天日期的行
df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')]
config.logger.info(df1.head())
# config.logger.info(f'{df1.head()}')
2024-12-26 15:58:14 +08:00
2024-11-01 16:38:21 +08:00
df_zhibiaoshuju = df1.copy()
df_zhibiaoliebiao = df.copy()
2025-03-05 09:47:02 +08:00
return df_zhibiaoshuju, df_zhibiaoliebiao
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
def get_eta_api_pp_data(self, data_set, dataset=''):
2024-11-01 16:38:21 +08:00
global ClassifyId
today = datetime.date.today().strftime("%Y-%m-%d")
2025-03-05 09:47:02 +08:00
# 定义你的headers这里可以包含多个参数
self.headers = {
'nonce': self.signature.nonce, # 例如,一个认证令牌
# 自定义的header参数
'timestamp': str(self.signature.timestamp),
2024-11-01 16:38:21 +08:00
'appid': self.signature.APPID, # 另一个自定义的header参数
'signature': self.signature.signature
}
# 从列表数据中获取指标名称,判断指标名称频度是否为日 如果是则获取UniqueCode然后获取指标数据保存到xlat文件中的sheet表。
'''
df = sheetname 指标列表存储 指标分类-指标名称-指标id-频度
df1 = sheetname 指标数据 ,存储 时间-指标名称1-指标名称2...
'''
# 构建新的DataFrame df df1
df = pd.DataFrame(columns=['指标分类', '指标名称', '指标id', '频度'])
df1 = pd.DataFrame(columns=['DataTime'])
# 外网环境无法访问,请确认是否为内网环境
try:
# 发送GET请求 获取指标分类列表
2025-03-05 09:47:02 +08:00
response = requests.get(self.classifylisturl, headers=self.headers)
2024-11-01 16:38:21 +08:00
except requests.exceptions.RequestException as e:
2025-03-05 09:47:02 +08:00
raise Exception(f"请求失败,请确认是否为内网环境: {e}", "\033[0m")
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
# 获取成功, 处理响应内容
2025-03-05 09:47:02 +08:00
data = response.json() # 假设接口返回的是JSON数据
# 请求成功,处理响应内容
# config.logger.info(data.get('Data'))
2025-03-05 09:47:02 +08:00
# 定义你想要保留的固定值
2024-11-01 16:38:21 +08:00
fixed_value = ClassifyId
2025-03-05 09:47:02 +08:00
# 遍历列表,只保留那些'category' key的值为固定值的数据项
filtered_data = [item for item in data.get(
'Data') if item.get('ParentId') == fixed_value]
# 然后循环filtered_data去获取list数据才能获取到想要获取的ClassifyId
2024-11-01 16:38:21 +08:00
n = 0
for item in filtered_data:
2025-03-05 09:47:02 +08:00
n += 1
2024-11-01 16:38:21 +08:00
# if n>50:
# break
2025-03-05 09:47:02 +08:00
ClassifyId = item["ClassifyId"] # 分类id分类下的指标列表接口的请求参数
ClassifyName = item["ClassifyName"] # 分类名称要保存到df的指标分类列
2024-11-01 16:38:21 +08:00
# 根据分类id获取指标列表
url = self.classifyidlisturl+str(ClassifyId)
response = requests.get(url, headers=self.headers)
2025-03-05 09:47:02 +08:00
if response.status_code == 200:
# config.logger.info(response.text)
2024-11-01 16:38:21 +08:00
data2 = response.json()
Data = data2.get('Data')
for i in Data:
# s+= 1
EdbCode = i.get('EdbCode')
2025-03-05 09:47:02 +08:00
# 指标名称要保存到df2的指标名称列,df的指标名称列
EdbName = i.get('EdbName')
2024-11-01 16:38:21 +08:00
Frequency = i.get('Frequency') # 频度要保存到df的频度列
# 频度不是 日 或者 周的 跳过
2025-03-05 09:47:02 +08:00
if Frequency not in ['日度', '周度', '', '']:
2024-11-01 16:38:21 +08:00
continue
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
# 判断名称是否需要保存
2025-03-05 09:47:02 +08:00
isSave = self.filter_pp_data(ClassifyName, EdbName)
2024-11-01 16:38:21 +08:00
if isSave:
# 保存到df
# 保存频度 指标名称 分类 指标id 到 df
2025-03-05 09:47:02 +08:00
df2 = pd.DataFrame(
{'指标分类': ClassifyName, '指标名称': EdbName, '指标id': EdbCode, '频度': Frequency}, index=[0])
# df = pd.merge(df, df2, how='outer')
2024-11-01 16:38:21 +08:00
df = pd.concat([df, df2])
2025-03-05 09:47:02 +08:00
df1 = self.edbcodegetdata(df1, EdbCode, EdbName)
2024-11-01 16:38:21 +08:00
else:
config.logger.info(f'跳过指标 {EdbName}')
2024-11-01 16:38:21 +08:00
# 找到列表中不在指标列中的指标id保存成新的list
2025-03-05 09:47:02 +08:00
new_list = [
item for item in self.edbcodelist if item not in df['指标id'].tolist()]
config.logger.info(new_list)
2024-11-01 16:38:21 +08:00
# 遍历new_list获取指标数据保存到df1
for item in new_list:
config.logger.info(item)
2024-11-01 16:38:21 +08:00
# 将item 加入到 df['指标id']中
try:
itemname = config.edbcodenamedict[item]
2024-11-01 16:38:21 +08:00
except:
itemname = item
2025-03-05 09:47:02 +08:00
df1 = self.edbcodegetdata(df1, item, itemname)
df = pd.concat([df, pd.DataFrame(
{'指标分类': '其他', '指标名称': itemname, '指标id': item, '频度': '其他'}, index=[0])])
2024-11-01 16:38:21 +08:00
# 按时间排序
2025-03-05 09:47:02 +08:00
df1.sort_values('DataTime', inplace=True, ascending=False)
df1.rename(columns={'DataTime': 'date'}, inplace=True)
2024-11-01 16:38:21 +08:00
# df1.dropna(inplace=True)
# 去掉大于今天日期的行
df1 = df1[df1['date'] <= datetime.datetime.now().strftime('%Y-%m-%d')]
config.logger.info(df1.head())
# config.logger.info(f'{df1.head()}')
2024-11-01 16:38:21 +08:00
# 保存到xlsx文件的sheet表
2025-03-05 09:47:02 +08:00
with pd.ExcelWriter(os.path.join(dataset, data_set)) as file:
2024-11-01 16:38:21 +08:00
df1.to_excel(file, sheet_name='指标数据', index=False)
df.to_excel(file, sheet_name='指标列表', index=False)
df_zhibiaoshuju = df1.copy()
df_zhibiaoliebiao = df.copy()
2025-03-05 09:47:02 +08:00
return df_zhibiaoshuju, df_zhibiaoliebiao
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
def push_data(self, data):
2024-11-01 16:38:21 +08:00
today = datetime.date.today().strftime("%Y-%m-%d")
2025-03-05 09:47:02 +08:00
# 定义你的headers这里可以包含多个参数
self.headers = {
'nonce': self.signature.nonce, # 例如,一个认证令牌
# 自定义的header参数
'timestamp': str(self.signature.timestamp),
2024-11-01 16:38:21 +08:00
'appid': self.signature.APPID, # 另一个自定义的header参数
'signature': self.signature.signature
}
# 发送post请求 上传数据
config.logger.info(f'请求参数:{data}')
2025-03-05 09:47:02 +08:00
response = requests.post(
self.edbdatapushurl, headers=self.headers, data=json.dumps(data))
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
data = response.json() # 假设接口返回的是JSON数据
2025-03-05 09:47:02 +08:00
config.logger.info(f'上传成功,响应为:{data}')
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
else:
# 请求失败,打印错误信息
config.logger.info(
f'Error: {response.status_code}, {response.text}')
2024-11-01 16:38:21 +08:00
# 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}')
2025-03-05 09:47:02 +08:00
def del_zhibiao(self, IndexCodeList):
2024-11-01 16:38:21 +08:00
today = datetime.date.today().strftime("%Y-%m-%d")
2025-03-05 09:47:02 +08:00
# 定义你的headers这里可以包含多个参数
self.headers = {
'nonce': self.signature.nonce, # 例如,一个认证令牌
# 自定义的header参数
'timestamp': str(self.signature.timestamp),
2024-11-01 16:38:21 +08:00
'appid': self.signature.APPID, # 另一个自定义的header参数
'signature': self.signature.signature
}
data = {
2025-03-05 09:47:02 +08:00
"IndexCodeList": IndexCodeList # 指标编码列表
}
2024-11-01 16:38:21 +08:00
# 发送post请求 上传数据
2025-03-05 09:47:02 +08:00
response = requests.post(
self.edbdeleteurl, headers=self.headers, data=json.dumps(data))
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
data = response.json() # 假设接口返回的是JSON数据
2025-03-05 09:47:02 +08:00
config.logger.info('删除成功,响应为:', data)
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
else:
# 请求失败,打印错误信息
config.logger.info(
f'Error: {response.status_code}, {response.text}')
2024-11-01 16:38:21 +08:00
# 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}')
2025-03-05 09:47:02 +08:00
def del_business(self, data):
2024-11-01 16:38:21 +08:00
''''
接口地址
https://console-docs.apipost.cn/preview/fce869601d0be1d9/9a637c2f9ed0c589?target_id=d3cafcbf-a68c-42b3-b105-7bbd0e95a9cd
请求体 body
{
"IndexCode": "W001067", //指标编码
"StartDate": "2020-04-20", //指标需要删除的开始日期>=如果开始日期和结束日期相等那么就是删除该日期
"EndDate": "2024-05-28" //指标需要删除的结束日期<=如果开始日期和结束日期相等那么就是删除该日期
}
'''
today = datetime.date.today().strftime("%Y-%m-%d")
2025-03-05 09:47:02 +08:00
# 定义你的headers这里可以包含多个参数
self.headers = {
'nonce': self.signature.nonce, # 例如,一个认证令牌
# 自定义的header参数
'timestamp': str(self.signature.timestamp),
2024-11-01 16:38:21 +08:00
'appid': self.signature.APPID, # 另一个自定义的header参数
'signature': self.signature.signature
}
# 发送post请求 上传数据
2025-03-05 09:47:02 +08:00
response = requests.post(
self.edbbusinessurl, headers=self.headers, data=json.dumps(data))
2024-11-01 16:38:21 +08:00
2025-03-05 09:47:02 +08:00
# 检查响应状态码
if response.status_code == 200:
2024-11-01 16:38:21 +08:00
data = response.json() # 假设接口返回的是JSON数据
2025-03-05 09:47:02 +08:00
config.logger.info('删除成功,响应为:', data)
2025-03-05 09:47:02 +08:00
2024-11-01 16:38:21 +08:00
else:
# 请求失败,打印错误信息
config.logger.info(
f'Error: {response.status_code}, {response.text}')
2024-11-01 16:38:21 +08:00
# 主动抛出异常
raise Exception(f'Error: {response.status_code}, {response.text}')
2025-03-05 09:47:02 +08:00
def get_market_data(end_time, df):
"""
获取市场数据拼接到df中
"""
# 获取token
token = get_head_auth_report()
# 定义请求参数
config.query_data_list_item_nos_data['data']['dateEnd'] = end_time.replace(
2025-03-05 09:47:02 +08:00
'-', '')
# 发送请求
headers = {"Authorization": token}
config.logger.info('获取数据中...')
items_res = requests.post(url=config.query_data_list_item_nos_url, headers=headers,
json=config.query_data_list_item_nos_data, timeout=(3, 35))
json_data = json.loads(items_res.text)
config.logger.info(f"获取到的数据:{json_data}")
2024-12-26 15:58:14 +08:00
df3 = pd.DataFrame(json_data['data'])
# 按照dataItemNo 分组 得到多个dataframe 最后根据dataDate merge 成一个dataframe
df2 = pd.DataFrame()
2024-12-26 15:58:14 +08:00
for i in df3['dataItemNo'].unique():
df1 = df3[df3['dataItemNo'] == i]
df1 = df1[['dataDate', 'dataValue']]
df1 = df1.rename(columns={'dataValue': i})
2024-12-26 15:58:14 +08:00
if len(df2) == 0:
df2 = df1
continue
df2 = pd.merge(df2, df1, how='left')
df2 = df2.rename(columns={'dataDate': 'date'})
# 20240101 转换为 2024-01-01
2024-12-26 15:58:14 +08:00
df2['date'] = pd.to_datetime(df2['date'], format='%Y%m%d')
df2['date'] = df2['date'].dt.strftime('%Y-%m-%d')
2025-03-05 09:47:02 +08:00
df = pd.merge(df, df2, how='left', on='date')
return df
2024-12-30 15:29:36 +08:00
def get_high_low_data(df):
# 读取excel 从第五行开始
df1 = pd.read_excel(os.path.join(config.dataset, '数据项下载.xls'), header=5, names=[
2025-03-05 09:47:02 +08:00
'numid', 'date', 'Brentzdj', 'Brentzgj'])
2024-12-30 15:29:36 +08:00
# 合并数据
2025-03-05 09:47:02 +08:00
df = pd.merge(df, df1, how='left', on='date')
2024-12-30 15:29:36 +08:00
return df
2025-03-05 09:47:02 +08:00
2024-12-30 15:29:36 +08:00
2024-11-01 16:38:21 +08:00
# 时间特征,年,月,一年的多少天,周几,第几周,第几季度,每月的第几天, 每季度的第几天,是否每月的第一天,是否每月的最后一天,是否每季度的第一天,是否每季度的最后一天,是否每年的第一天,是否每年的最后一天
2025-03-05 09:47:02 +08:00
def addtimecharacteristics(df, dataset):
2024-11-01 16:38:21 +08:00
"""
为输入的 DataFrame 添加日期相关信息列
参数
df (pandas.DataFrame): 包含日期列 'ds' DataFrame
返回
pandas.DataFrame: 添加了相关列的 DataFrame
"""
df['year'] = df['ds'].dt.year
df['month'] = df['ds'].dt.month
df['day'] = df['ds'].dt.day
df['dayofweek'] = df['ds'].dt.dayofweek
df['weekofyear'] = df['ds'].dt.isocalendar().week
df['dayofyear'] = df['ds'].dt.dayofyear
df['quarternum'] = df['ds'].dt.quarter
2025-03-05 09:47:02 +08:00
# 将ds列转换为季度Period对象
df['quarter'] = df['ds'].dt.to_period('Q')
# 获取每个季度的开始日期
df['quarter_start'] = df['quarter'].dt.to_timestamp('s')
# 计算每个日期是所在季度的第几天
df['dayofquarter'] = (df['ds'] - df['quarter_start']).dt.days + 1
2024-11-01 16:38:21 +08:00
# 是否月初
df['is_month_start'] = df['ds'].dt.is_month_start.astype(int)
# 是否月末
df['is_month_end'] = df['ds'].dt.is_month_end.astype(int)
# 是否季度初
df['is_quarter_start'] = df['ds'].dt.is_quarter_start.astype(int)
# 是否季度末
df['is_quarter_end'] = df['ds'].dt.is_quarter_end.astype(int)
# 是否年初
df['is_year_start'] = df['ds'].dt.is_year_start.astype(int)
# 是否年末
df['is_year_end'] = df['ds'].dt.is_year_end.astype(int)
# 添加月度第几周周一到周日为一周每月1日所在的周为第一周
# 计算当前日期所在周的周一
2025-03-05 09:47:02 +08:00
df['current_monday'] = df['ds'] - \
pd.to_timedelta(df['ds'].dt.dayofweek, unit='D')
# 计算当月1日所在周的周一
2025-03-05 09:47:02 +08:00
df['first_monday'] = df['ds'].dt.to_period('M').dt.start_time - pd.to_timedelta(
df['ds'].dt.to_period('M').dt.start_time.dt.dayofweek, unit='D')
# 计算周数差并+1得到周数
2025-03-05 09:47:02 +08:00
df['weekofmonth'] = (
(df['current_monday'] - df['first_monday']).dt.days // 7) + 1
df['yearmonthweeks'] = df['year'].astype(
str) + df['month'].astype(str) + df['weekofmonth'].astype(str)
df.drop(columns=['current_monday', 'first_monday'], inplace=True)
2025-03-05 09:47:02 +08:00
# 去掉 quarter_start quarter
df.drop(columns=['quarter_start', 'quarter'], inplace=True)
df.to_csv(os.path.join(dataset, '指标数据添加时间特征.csv'), index=False)
2024-11-01 16:38:21 +08:00
return df