玉湖光伏-气象标准化.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. # -*- coding: utf-8 -*-
  2. """
  3. Created on Tue Jul 9 16:28:48 2024
  4. @author: Administrator
  5. """
  6. from os import *
  7. import chardet
  8. import pandas as pd
  9. pd.options.mode.copy_on_write = True
  10. # 获取文件编码
  11. def detect_file_encoding(filename):
  12. # 读取文件的前1000个字节(足够用于大多数编码检测)
  13. with open(filename, 'rb') as f:
  14. rawdata = f.read(1000)
  15. result = chardet.detect(rawdata)
  16. encoding = result['encoding']
  17. if encoding is None:
  18. encoding = 'gb18030'
  19. if encoding and encoding.lower() == 'gb2312' or encoding.lower().startswith("windows"):
  20. encoding = 'gb18030'
  21. return encoding
  22. # 读取数据到df
  23. def read_file_to_df(file_path, read_cols=list(), header=0):
  24. df = pd.DataFrame()
  25. if str(file_path).lower().endswith("csv") or str(file_path).lower().endswith("gz"):
  26. encoding = detect_file_encoding(file_path)
  27. end_with_gz = str(file_path).lower().endswith("gz")
  28. if read_cols:
  29. if end_with_gz:
  30. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, compression='gzip', header=header)
  31. else:
  32. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, header=header, on_bad_lines='warn')
  33. else:
  34. if end_with_gz:
  35. df = pd.read_csv(file_path, encoding=encoding, compression='gzip', header=header)
  36. else:
  37. df = pd.read_csv(file_path, encoding=encoding, header=header, on_bad_lines='warn')
  38. else:
  39. xls = pd.ExcelFile(file_path)
  40. # 获取所有的sheet名称
  41. sheet_names = xls.sheet_names
  42. for sheet in sheet_names:
  43. if read_cols:
  44. df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header, usecols=read_cols)])
  45. else:
  46. df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header)])
  47. return df
  48. def __build_directory_dict(directory_dict, path, filter_types=None):
  49. # 遍历目录下的所有项
  50. for item in listdir(path):
  51. item_path = path.join(path, item)
  52. if path.isdir(item_path):
  53. __build_directory_dict(directory_dict, item_path, filter_types=filter_types)
  54. elif path.isfile(item_path):
  55. if path not in directory_dict:
  56. directory_dict[path] = []
  57. if filter_types is None or len(filter_types) == 0:
  58. directory_dict[path].append(item_path)
  59. elif str(item_path).split(".")[-1] in filter_types:
  60. if str(item_path).count("~$") == 0:
  61. directory_dict[path].append(item_path)
  62. # 读取所有文件
  63. # 读取路径下所有的excel文件
  64. def read_excel_files(read_path):
  65. directory_dict = {}
  66. __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz'])
  67. return [path for paths in directory_dict.values() for path in paths if path]
  68. # 创建路径
  69. def create_file_path(path, is_file_path=False):
  70. if is_file_path:
  71. path = path.dirname(path)
  72. if not path.exists(path):
  73. makedirs(path, exist_ok=True)
  74. if __name__ == '__main__':
  75. # path = r'/data/download/大唐玉湖性能分析离线分析/05整理数据/气象站数据'
  76. # save_path = r'/data/download/大唐玉湖性能分析离线分析/06整理数据/气象站数据'
  77. path = r'Z:\大唐玉湖性能分析离线分析\05整理数据\气象站数据'
  78. save_path = r'Z:\大唐玉湖性能分析离线分析\06整理数据\气象站数据'
  79. fengsu_df = read_file_to_df(path.join(path, '风速.csv'), read_cols=['当前时间', '实际风速'])
  80. fengxiang_df = read_file_to_df(path.join(path, '风向.csv'), read_cols=['当前时间', '实际风向'])
  81. fuzhaodu_df = read_file_to_df(path.join(path, '辐照度.csv'), read_cols=['时间', '水平总辐照度', '倾斜总辐照度', '散射辐照度'])
  82. shidu_df = read_file_to_df(path.join(path, '湿度.csv'), read_cols=['时间', '实际湿度'])
  83. wendu_df = read_file_to_df(path.join(path, '温度.csv'), read_cols=['时间', '实际温度'])
  84. yali_df = read_file_to_df(path.join(path, '压力.csv'), read_cols=['时间', '实际气压'])
  85. fengsu_df.rename(columns={'当前时间': '时间'}, inplace=True)
  86. fengxiang_df.rename(columns={'当前时间': '时间'}, inplace=True)
  87. dfs = [fengxiang_df, fengsu_df, fuzhaodu_df, shidu_df, wendu_df, yali_df]
  88. for df in dfs:
  89. df['时间'] = pd.to_datetime(df['时间'])
  90. df.set_index(keys='时间', inplace=True)
  91. df = pd.concat(dfs, axis=1)
  92. create_file_path(save_path, is_file_path=False)
  93. df.to_csv(path.join(save_path, '气象合并.csv'), encoding='utf-8')