pv_youxiaoxing.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. import multiprocessing
  2. import os
  3. import matplotlib
  4. import numpy as np
  5. from matplotlib import pyplot as plt
  6. matplotlib.use('Agg')
  7. matplotlib.rcParams['font.family'] = 'SimHei' # 或者 'Microsoft YaHei'
  8. matplotlib.rcParams['font.sans-serif'] = ['SimHei'] # 或者 ['Microsoft YaHei']
  9. import chardet
  10. import warnings
  11. warnings.filterwarnings("ignore")
  12. import datetime
  13. import pandas as pd
  14. def get_time_space(df, time_str):
  15. """
  16. :return: 查询时间间隔
  17. """
  18. begin = datetime.datetime.now()
  19. df1 = pd.DataFrame(df[time_str])
  20. df1[time_str] = pd.to_datetime(df1[time_str], errors='coerce')
  21. df1.sort_values(by=time_str, inplace=True)
  22. df1['chazhi'] = df1[time_str].shift(-1) - df1[time_str]
  23. result = df1.sample(int(df1.shape[0] / 100))['chazhi'].value_counts().idxmax().seconds
  24. del df1
  25. print(datetime.datetime.now() - begin)
  26. return abs(result)
  27. def get_time_space_count(start_time: datetime.datetime, end_time: datetime.datetime, time_space=1):
  28. """
  29. 获取俩个时间之间的个数
  30. :return: 查询时间间隔
  31. """
  32. delta = end_time - start_time
  33. total_seconds = delta.days * 24 * 60 * 60 + delta.seconds
  34. return abs(int(total_seconds / time_space)) + 1
  35. # 获取文件编码
  36. def detect_file_encoding(filename):
  37. # 读取文件的前1000个字节(足够用于大多数编码检测)
  38. with open(filename, 'rb') as f:
  39. rawdata = f.read(1000)
  40. result = chardet.detect(rawdata)
  41. encoding = result['encoding']
  42. if encoding is None:
  43. encoding = 'gb18030'
  44. if encoding and encoding.lower() == 'gb2312' or encoding.lower().startswith("windows"):
  45. encoding = 'gb18030'
  46. return encoding
  47. def del_blank(df=pd.DataFrame(), cols=list()):
  48. for col in cols:
  49. if df[col].dtype == object:
  50. df[col] = df[col].str.strip()
  51. return df
  52. # 切割数组到多个数组
  53. def split_array(array, num):
  54. return [array[i:i + num] for i in range(0, len(array), num)]
  55. # 读取数据到df
  56. def read_file_to_df(file_path, read_cols=list(), header=0):
  57. try:
  58. df = pd.DataFrame()
  59. if str(file_path).lower().endswith("csv") or str(file_path).lower().endswith("gz"):
  60. encoding = detect_file_encoding(file_path)
  61. end_with_gz = str(file_path).lower().endswith("gz")
  62. if read_cols:
  63. if end_with_gz:
  64. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, compression='gzip', header=header)
  65. else:
  66. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, header=header,
  67. on_bad_lines='warn')
  68. else:
  69. if end_with_gz:
  70. df = pd.read_csv(file_path, encoding=encoding, compression='gzip', header=header)
  71. else:
  72. df = pd.read_csv(file_path, encoding=encoding, header=header, on_bad_lines='warn')
  73. else:
  74. xls = pd.ExcelFile(file_path)
  75. # 获取所有的sheet名称
  76. sheet_names = xls.sheet_names
  77. for sheet in sheet_names:
  78. if read_cols:
  79. now_df = pd.read_excel(xls, sheet_name=sheet, header=header, usecols=read_cols)
  80. else:
  81. now_df = pd.read_excel(xls, sheet_name=sheet, header=header)
  82. df = pd.concat([df, now_df])
  83. print('文件读取成功', file_path, '文件数量', df.shape)
  84. except Exception as e:
  85. print('读取文件出错', file_path, str(e))
  86. message = '文件:' + os.path.basename(file_path) + ',' + str(e)
  87. raise ValueError(message)
  88. return df
  89. def __build_directory_dict(directory_dict, path, filter_types=None):
  90. # 遍历目录下的所有项
  91. for item in os.listdir(path):
  92. item_path = os.path.join(path, item)
  93. if os.path.isdir(item_path):
  94. __build_directory_dict(directory_dict, item_path, filter_types=filter_types)
  95. elif os.path.isfile(item_path):
  96. if path not in directory_dict:
  97. directory_dict[path] = []
  98. if filter_types is None or len(filter_types) == 0:
  99. directory_dict[path].append(item_path)
  100. elif str(item_path).split(".")[-1] in filter_types:
  101. if str(item_path).count("~$") == 0:
  102. directory_dict[path].append(item_path)
  103. # 读取所有文件
  104. # 读取路径下所有的excel文件
  105. def read_excel_files(read_path):
  106. directory_dict = {}
  107. __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz'])
  108. return [path for paths in directory_dict.values() for path in paths if path]
  109. # 创建路径
  110. def create_file_path(path, is_file_path=False):
  111. if is_file_path:
  112. path = os.path.dirname(path)
  113. if not os.path.exists(path):
  114. os.makedirs(path, exist_ok=True)
  115. def time_biaozhun(df):
  116. time_space = get_time_space(df, '时间')
  117. query_df = df[['时间']]
  118. query_df['时间'] = pd.to_datetime(df['时间'], errors="coerce")
  119. query_df = query_df.dropna(subset=['时间'])
  120. total = get_time_space_count(query_df['时间'].min(), query_df['时间'].max(), time_space)
  121. return total, save_percent(1 - query_df.shape[0] / total), save_percent(1 - df.shape[0] / total)
  122. def save_percent(value, save_decimal=7):
  123. return round(value, save_decimal) * 100
  124. def calc(df, file_name):
  125. error_dict = {}
  126. lose_dict = {}
  127. error_dict['箱变'] = "".join(file_name.split(".")[:-1])
  128. lose_dict['箱变'] = "".join(file_name.split(".")[:-1])
  129. total, lose_time, error_time = time_biaozhun(df)
  130. error_dict['时间'] = error_time
  131. lose_dict['时间'] = lose_time
  132. error_df = pd.DataFrame()
  133. lose_df = pd.DataFrame()
  134. try:
  135. df.columns = ["".join(["逆变器" + "".join(col.split("逆变器")[1:])]) if col.find("逆变器") > -1 else col for col in
  136. df.columns]
  137. for col in df.columns:
  138. if col == '时间':
  139. continue
  140. query_df = df[[col]]
  141. query_df[col] = pd.to_numeric(query_df[col], errors="coerce")
  142. query_df = query_df.dropna(subset=[col])
  143. lose_dict[col] = save_percent(1 - query_df.shape[0] / total)
  144. if col.find('电压') > -1:
  145. error_dict[col] = save_percent(query_df[query_df[col] < 0].shape[0] / total)
  146. if col.find('电流') > -1:
  147. error_dict[col] = save_percent(query_df[query_df[col] < -0.1].shape[0] / total)
  148. if col.find('逆变器效率') > -1:
  149. error_dict[col] = save_percent(query_df[(query_df[col] <= 0) | (query_df[col] >= 100)].shape[0] / total)
  150. if col.find('温度') > -1:
  151. error_dict[col] = save_percent(query_df[(query_df[col] < 0) | (query_df[col] > 100)].shape[0] / total)
  152. if col.find('功率因数') > -1:
  153. error_dict[col] = save_percent(query_df[(query_df[col] < 0) | (query_df[col] > 1)].shape[0] / total)
  154. total, count = 0, 0
  155. for k, v in error_dict.items():
  156. if k != '箱变':
  157. total = total + error_dict[k]
  158. count = count + 1
  159. error_dict['平均异常率'] = save_percent(total / count / 100)
  160. total, count = 0, 0
  161. for k, v in lose_dict.items():
  162. if k != '箱变':
  163. total = total + lose_dict[k]
  164. count = count + 1
  165. lose_dict['平均缺失率'] = save_percent(total / count / 100)
  166. error_df = pd.concat([error_df, pd.DataFrame(error_dict, index=[0])])
  167. lose_df = pd.concat([lose_df, pd.DataFrame(lose_dict, index=[0])])
  168. error_df_cols = ['箱变', '平均异常率']
  169. for col in error_df.columns:
  170. if col not in error_df_cols:
  171. error_df_cols.append(col)
  172. lose_df_cols = ['箱变', '平均缺失率']
  173. for col in lose_df.columns:
  174. if col not in lose_df_cols:
  175. lose_df_cols.append(col)
  176. error_df = error_df[error_df_cols]
  177. lose_df = lose_df[lose_df_cols]
  178. except Exception as e:
  179. print("异常文件", os.path.basename(file_name))
  180. raise e
  181. return error_df, lose_df
  182. def run(file_path):
  183. df = read_file_to_df(file_path)
  184. return calc(df, os.path.basename(file_path))
  185. if __name__ == '__main__':
  186. # read_path = r'/data/download/大唐玉湖性能分析离线分析/05整理数据/逆变器数据'
  187. # save_path = r'/data/download/大唐玉湖性能分析离线分析/06整理数据/逆变器数据'
  188. read_path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\yuanshi'
  189. save_path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\zhengli'
  190. all_files = read_excel_files(read_path)
  191. with multiprocessing.Pool(2) as pool:
  192. df_arrys = pool.starmap(run, [(file,) for file in all_files])
  193. error_df = pd.concat([df[0] for df in df_arrys])
  194. lose_df = pd.concat([df[1] for df in df_arrys])
  195. with pd.ExcelWriter(os.path.join(save_path, "玉湖光伏数据统计.xlsx")) as writer:
  196. error_df.to_excel(writer, sheet_name='error_percent', index=False)
  197. lose_df.to_excel(writer, sheet_name='lose_percent', index=False)