大唐玉湖数据整理.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Tue Jul 9 16:28:48 2024
  4. @author: Administrator
  5. """
  6. import multiprocessing
  7. from datetime import datetime
  8. from os import *
  9. import chardet
  10. import pandas as pd
  11. pd.options.mode.copy_on_write = True
  12. # 获取文件编码
  13. def detect_file_encoding(filename):
  14. # 读取文件的前1000个字节(足够用于大多数编码检测)
  15. with open(filename, 'rb') as f:
  16. rawdata = f.read(1000)
  17. result = chardet.detect(rawdata)
  18. encoding = result['encoding']
  19. if encoding is None:
  20. encoding = 'gb18030'
  21. if encoding and encoding.lower() == 'gb2312' or encoding.lower().startswith("windows"):
  22. encoding = 'gb18030'
  23. return encoding
  24. # 读取数据到df
  25. def read_file_to_df(file_path, read_cols=list(), header=0):
  26. df = pd.DataFrame()
  27. if str(file_path).lower().endswith("csv") or str(file_path).lower().endswith("gz"):
  28. encoding = detect_file_encoding(file_path)
  29. end_with_gz = str(file_path).lower().endswith("gz")
  30. if read_cols:
  31. if end_with_gz:
  32. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, compression='gzip', header=header)
  33. else:
  34. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, header=header, on_bad_lines='warn')
  35. else:
  36. if end_with_gz:
  37. df = pd.read_csv(file_path, encoding=encoding, compression='gzip', header=header)
  38. else:
  39. df = pd.read_csv(file_path, encoding=encoding, header=header, on_bad_lines='warn')
  40. else:
  41. xls = pd.ExcelFile(file_path)
  42. # 获取所有的sheet名称
  43. sheet_names = xls.sheet_names
  44. for sheet in sheet_names:
  45. if read_cols:
  46. df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header, usecols=read_cols)])
  47. else:
  48. df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header)])
  49. return df
  50. def __build_directory_dict(directory_dict, path, filter_types=None):
  51. # 遍历目录下的所有项
  52. for item in listdir(path):
  53. item_path = path.join(path, item)
  54. if path.isdir(item_path):
  55. __build_directory_dict(directory_dict, item_path, filter_types=filter_types)
  56. elif path.isfile(item_path):
  57. if path not in directory_dict:
  58. directory_dict[path] = []
  59. if filter_types is None or len(filter_types) == 0:
  60. directory_dict[path].append(item_path)
  61. elif str(item_path).split(".")[-1] in filter_types:
  62. if str(item_path).count("~$") == 0:
  63. directory_dict[path].append(item_path)
  64. # 读取所有文件
  65. # 读取路径下所有的excel文件
  66. def read_excel_files(read_path):
  67. directory_dict = {}
  68. __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz'])
  69. return [path for paths in directory_dict.values() for path in paths if path]
  70. # 创建路径
  71. def create_file_path(path, is_file_path=False):
  72. if is_file_path:
  73. path = path.dirname(path)
  74. if not path.exists(path):
  75. makedirs(path, exist_ok=True)
  76. def generate_df(pv_df, col):
  77. if col != '时间':
  78. xiangbian = col.split("逆变器")[0].replace("#", "")
  79. nibianqi = col.split("-")[0].split('逆变器')[1]
  80. pv_index = col.split("-")[1].replace("PV", "")
  81. now_df = pv_df[['时间', col + '输入电流()', col + '输入电压()']]
  82. now_df.loc[:, '箱变'] = xiangbian
  83. now_df.loc[:, '逆变器'] = nibianqi
  84. now_df.loc[:, 'PV'] = pv_index
  85. now_df.columns = [df_col.replace(col, "").replace("()", "") for df_col in now_df.columns]
  86. now_df['输入电流'] = now_df['输入电流'].astype(float)
  87. now_df['输入电压'] = now_df['输入电压'].astype(float)
  88. print(xiangbian, nibianqi, pv_index, now_df.shape)
  89. return now_df
  90. return pd.DataFrame()
  91. def read_and_save_csv(file_path, save_path):
  92. begin = datetime.now()
  93. base_name = path.basename(file_path)
  94. print('开始', base_name)
  95. df = read_file_to_df(file_path)
  96. df['时间'] = pd.to_datetime(df['时间'])
  97. # df.set_index(keys='时间', inplace=True)
  98. pv_df_cols = [col for col in df.columns if col.find('输入电') > -1]
  99. pv_df_cols.append('时间')
  100. pv_df = df[pv_df_cols]
  101. shuru_cols = set([col.split("输入电")[0] for col in pv_df.columns])
  102. with multiprocessing.Pool(6) as pool:
  103. dfs = pool.starmap(generate_df, [(pv_df, col) for col in shuru_cols])
  104. saved_pv_df = pd.concat(dfs)
  105. saved_pv_df.sort_values(by=['箱变', '逆变器', 'PV', '时间'], inplace=True)
  106. save_file = path.join(save_path, path.basename(file_path).split(".")[0], 'PV.csv')
  107. create_file_path(save_file, True)
  108. saved_pv_df.to_csv(save_file, encoding='utf-8', index=False)
  109. print('结束', base_name, '耗时:' + str(datetime.now() - begin))
  110. if __name__ == '__main__':
  111. path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\yuanshi'
  112. save_path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\zhengli'
  113. all_files = read_excel_files(path)
  114. all_datas = list(all_files)
  115. all_datas.sort()
  116. print(all_datas)
  117. for file in all_datas:
  118. read_and_save_csv(file, save_path)
  119. # with Pool(1) as pool:
  120. # pool.starmap(read_and_save_csv, [(i, save_path) for i in all_datas])