123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208 |
- import datetime
- import multiprocessing
- import os
- import sys
- sys.path.insert(0, os.path.abspath(__file__).split("tmp_file")[0])
- import pandas as pd
- from utils.file.trans_methods import read_file_to_df, read_excel_files
- def get_time_space_count(start_time, end_time, time_space=1):
- """
- 获取俩个时间之间的个数
- :return: 查询时间间隔
- """
- if isinstance(start_time, str):
- start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
- if isinstance(end_time, str):
- end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
- delta = end_time - start_time
- total_seconds = delta.days * 24 * 60 * 60 + delta.seconds
- return abs(int(total_seconds / time_space)) + 1
- def save_percent(value, save_decimal=7):
- return round(value, save_decimal) * 100
- def read_and_select(file):
- result_df = pd.DataFrame()
- # wind_name = os.path.basename(file_path).split('.')[0]
- df = pd.read_csv(file)
- df['systime'] = pd.to_datetime(df['systime'], errors='coerce')
- # condation1 = (df[df['systime'] >= '2024-11-12 00:00:00']) & (df[df['systime'] <= '2024-11-19 12:15:35'])
- # condation2 = (df[df['systime'] >= '2024-12-02 00:00:00']) & (df[df['systime'] <= '2024-12-31 23:59:55'])
- # condation3 = (df[df['systime'] >= '2025-01-01 00:00:00']) & (df[df['systime'] <= '2025-01-21 23:59:55'])
- # condation4 = (df[df['systime'] >= '2025-01-31 00:00:00']) & (df[df['systime'] <= '2025-02-04 23:59:55'])
- #
- # condation = condation1 | condation2 | condation3 | condation4
- #
- # df = df[condation]
- read_cols = list(df.columns)
- read_cols.remove('systime')
- read_cols.remove('wecnum')
- wind_name = os.path.basename(file).replace('.csv', '')
- result_df['wecnum'] = [wind_name]
- # df = df.query("(Time>='2024-06-01 00:00:00') & (Time<'2024-12-01 00:00:00')")
- count1 = get_time_space_count('2024-11-12 00:00:00', '2024-11-19 12:15:35', 5)
- count2 = get_time_space_count('2024-12-02 00:00:00', '2024-12-31 23:59:55', 5)
- count3 = get_time_space_count('2025-01-01 00:00:00', '2025-01-21 23:59:55', 5)
- count4 = get_time_space_count('2025-01-31 00:00:00', '2025-02-04 23:59:55', 5)
- count = sum([count1, count2, count3, count4])
- print(df['systime'].min(), df['systime'].max(), count)
- repeat_time_count = df.shape[0] - len(df['systime'].unique())
- print(wind_name, count, repeat_time_count)
- result_df['重复率'] = [save_percent(repeat_time_count / count)]
- result_df['重复次数'] = [repeat_time_count]
- result_df['总记录数'] = [count]
- for read_col in read_cols:
- if read_col not in ['systime', 'plcvernew', 'dmsver', 'scadaver', 'collectime']:
- df[read_col] = pd.to_numeric(df[read_col], errors='coerce')
- group_df = df.groupby(by=['wecnum']).count()
- group_df.reset_index(inplace=True)
- count_df = pd.DataFrame(group_df)
- total_count = count_df[read_cols].values[0].sum()
- print(wind_name, total_count, count * len(read_cols))
- result_df['平均缺失率,单位%'] = [save_percent(1 - total_count / (count * len(read_cols)))]
- # result_df['缺失数值'] = [
- # '-'.join(
- # [str(read_cols[index]) + ':' + str(count - i) for index, i in enumerate(count_df[read_cols].values[0])])]
- del group_df
- fengsu_count = 0
- fengsu_cols = ['iwinfil']
- fengsu_str = ''
- for col in fengsu_cols:
- now_count = df[(df[col] < 0) | (df[col] > 80)].shape[0]
- fengsu_count = fengsu_count + now_count
- fengsu_str = fengsu_str + ',' + col + ':' + str(fengsu_count)
- result_df['风速异常'] = [fengsu_str]
- gonglv_cols = ['power']
- gonglv_count = 0
- gonglv_str = ''
- for col in gonglv_cols:
- now_count = df[(df[col] < -200) | (df[col] > 3000)].shape[0]
- gonglv_count = gonglv_count + now_count
- gonglv_str = gonglv_str + ',' + col + ':' + str(gonglv_count)
- result_df['功率异常'] = [gonglv_str]
- result_df['平均异常率'] = [
- save_percent((fengsu_count + fengsu_count) / ((len(fengsu_cols) + len(gonglv_cols)) * count))]
- return result_df
- def save_to_csv(df: pd.DataFrame, path):
- df.to_csv(path, encoding='utf8', index=False)
- def read_and_select_time(file):
- df = pd.read_csv(file, usecols=['collectime'])
- df['collectime'] = pd.to_datetime(df['collectime'])
- df1 = df[(df['collectime'] >= '2024-11-12 00:00:00') & (df['collectime'] <= '2024-11-19 23:59:59')]
- df2 = df[(df['collectime'] >= '2024-12-02 00:00:00') & (df['collectime'] <= '2024-12-31 23:59:59')]
- df3 = df[(df['collectime'] >= '2025-01-01 00:00:00') & (df['collectime'] <= '2025-01-21 23:59:59')]
- df4 = df[(df['collectime'] >= '2025-01-31 00:00:00') & (df['collectime'] <= '2025-02-04 23:59:59')]
- return [(df1['collectime'].min(), df1['collectime'].max()), (df2['collectime'].min(), df2['collectime'].max()),
- (df3['collectime'].min(), df3['collectime'].max()), (df4['collectime'].min(), df4['collectime'].max())]
- if __name__ == '__main__':
- # read_cols = ['Time', '设备主要状态', '功率曲线风速', '湍流强度', '实际风速', '有功功率', '桨叶角度A', '桨叶角度B',
- # '桨叶角度C', '机舱内温度', '机舱外温度', '绝对风向', '机舱绝对位置', '叶轮转速', '发电机转速',
- # '瞬时风速',
- # '有功设定反馈', '当前理论可发最大功率', '空气密度', '偏航误差', '发电机扭矩', '瞬时功率', '风向1s',
- # '偏航压力', '桨叶1速度', '桨叶2速度', '桨叶3速度', '桨叶1角度给定', '桨叶2角度给定', '桨叶3角度给定',
- # '轴1电机电流', '轴2电机电流', '轴3电机电流', '轴1电机温度', '轴2电机温度', '轴3电机温度', '待机',
- # '启动',
- # '偏航', '并网', '限功率', '正常发电', '故障', '计入功率曲线', '运行发电机冷却风扇1',
- # '运行发电机冷却风扇2',
- # '激活偏航解缆阀', '激活偏航刹车阀', '激活风轮刹车阀', '激活顺时针偏航', '激活逆时针偏航', '电缆扭角']
- # select_cols = ['wecnum', 'systime', 'power', 'iwinfil', 'hubpos1', 'hubpos2', 'hubpos3', 'windir']
- # read_dir = r'/data/download/collection_data/1进行中/七台河风电场-黑龙江-华电/收资数据/七台河/秒级数据/sec'
- # files = read_excel_files(read_dir)
- # dfs = list()
- # with multiprocessing.Pool(33) as pool:
- # dfs = pool.map(read_file_to_df, files)
- # df = pd.concat(dfs, ignore_index=True)
- # print(df.columns)
- # df['systime'] = pd.to_datetime(df['systime'], errors='coerce')
- # df['wecnum'] = pd.to_numeric(df['wecnum'], errors='coerce')
- # read_cols = list(df.columns)
- # read_cols.remove('systime')
- # read_cols.remove('wecnum')
- #
- # wind_names = df['wecnum'].unique()
- tmp_save_dir = r'/home/wzl/test_data/qitaihe/sec'
- # with multiprocessing.Pool(4) as pool:
- # pool.starmap(save_to_csv,
- # [(df[df['wecnum'] == wind_name], os.path.join(tmp_save_dir, str(wind_name) + '.csv')) for wind_name
- # in
- # wind_names])
- #
- # del df
- all_fils = read_excel_files(tmp_save_dir)
- with multiprocessing.Pool(10) as pool:
- dfs = pool.starmap(read_and_select,
- [(file,) for file in all_fils])
- resu_df = pd.concat(dfs, ignore_index=True)
- print(resu_df.columns)
- resu_df.sort_values(by=['wecnum'], inplace=True)
- resu_df.to_csv("七台河-5秒.csv", encoding='utf8', index=False)
- # with multiprocessing.Pool(10) as pool:
- # datas = pool.map(read_and_select_time, all_fils)
- #
- # min1 = list()
- # max1 = list()
- #
- # min2 = list()
- # max2 = list()
- #
- # min3 = list()
- # max3 = list()
- #
- # min4 = list()
- # max4 = list()
- #
- # for data in datas:
- # print(data)
- # data1, data2, data3, data4 = data[0], data[1], data[2], data[3]
- # min1.append(data1[0])
- # max1.append(data1[1])
- #
- # min2.append(data2[0])
- # max2.append(data2[1])
- #
- # min3.append(data3[0])
- # max3.append(data3[1])
- #
- # min4.append(data4[0])
- # max4.append(data4[1])
- #
- # print(min(min1), max(max1))
- # print(min(min2), max(max2))
- # print(min(min3), max(max3))
- # print(min(min4), max(max4))
|