trans_methods.py 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # -*- coding: utf-8 -*-
  2. # @Time : 2024/5/16
  3. # @Author : 魏志亮
  4. import datetime
  5. import os
  6. import re
  7. import shutil
  8. import warnings
  9. import chardet
  10. import pandas as pd
  11. from utils.log.trans_log import trans_print
  12. warnings.filterwarnings("ignore")
  13. # 获取文件编码
  14. def detect_file_encoding(filename):
  15. # 读取文件的前1000个字节(足够用于大多数编码检测)
  16. with open(filename, 'rb') as f:
  17. rawdata = f.read(1000)
  18. result = chardet.detect(rawdata)
  19. encoding = result['encoding']
  20. trans_print("文件类型:", filename, encoding)
  21. if encoding is None:
  22. encoding = 'gb18030'
  23. if encoding.lower() in ['utf-8', 'ascii', 'utf8']:
  24. return 'utf-8'
  25. return 'gb18030'
  26. def del_blank(df=pd.DataFrame(), cols=list()):
  27. for col in cols:
  28. if df[col].dtype == object:
  29. df[col] = df[col].str.strip()
  30. return df
  31. # 切割数组到多个数组
  32. def split_array(array, num):
  33. return [array[i:i + num] for i in range(0, len(array), num)]
  34. def find_read_header(file_path, trans_cols):
  35. print(trans_cols)
  36. df = read_file_to_df(file_path, nrows=20)
  37. count = 0
  38. for col in trans_cols:
  39. if col in df.columns:
  40. count = count + 1
  41. if count >= 2:
  42. return 0
  43. count = 0
  44. for index, row in df.iterrows():
  45. for col in trans_cols:
  46. if col in row.values:
  47. count = count + 1
  48. if count > 2:
  49. return index + 1
  50. return None
  51. # 读取数据到df
  52. def read_file_to_df(file_path, read_cols=list(), header=0, trans_cols=None, nrows=None):
  53. begin = datetime.datetime.now()
  54. trans_print('开始读取文件', file_path)
  55. if trans_cols:
  56. header = find_read_header(file_path, trans_cols)
  57. trans_print(os.path.basename(file_path), "读取第", header, "行")
  58. if header is None:
  59. message = '未匹配到开始行,请检查并重新指定'
  60. trans_print(message)
  61. raise Exception(message)
  62. try:
  63. df = pd.DataFrame()
  64. if str(file_path).lower().endswith("csv") or str(file_path).lower().endswith("gz"):
  65. encoding = detect_file_encoding(file_path)
  66. end_with_gz = str(file_path).lower().endswith("gz")
  67. if read_cols:
  68. if end_with_gz:
  69. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, compression='gzip', header=header,
  70. nrows=nrows)
  71. else:
  72. df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, header=header,
  73. on_bad_lines='warn', nrows=nrows)
  74. else:
  75. if end_with_gz:
  76. df = pd.read_csv(file_path, encoding=encoding, compression='gzip', header=header, nrows=nrows)
  77. else:
  78. df = pd.read_csv(file_path, encoding=encoding, header=header, on_bad_lines='warn', nrows=nrows)
  79. else:
  80. xls = pd.ExcelFile(file_path)
  81. # 获取所有的sheet名称
  82. sheet_names = xls.sheet_names
  83. for sheet_name in sheet_names:
  84. if read_cols:
  85. now_df = pd.read_excel(xls, sheet_name=sheet_name, header=header, usecols=read_cols, nrows=nrows)
  86. else:
  87. now_df = pd.read_excel(xls, sheet_name=sheet_name, header=header, nrows=nrows)
  88. now_df['sheet_name'] = sheet_name
  89. df = pd.concat([df, now_df])
  90. trans_print('文件读取成功', file_path, '文件数量', df.shape, '耗时', datetime.datetime.now() - begin)
  91. except Exception as e:
  92. trans_print('读取文件出错', file_path, str(e))
  93. message = '文件:' + os.path.basename(file_path) + ',' + str(e)
  94. raise ValueError(message)
  95. return df
  96. def __build_directory_dict(directory_dict, path, filter_types=None):
  97. # 遍历目录下的所有项
  98. for item in os.listdir(path):
  99. item_path = os.path.join(path, item)
  100. if os.path.isdir(item_path):
  101. __build_directory_dict(directory_dict, item_path, filter_types=filter_types)
  102. elif os.path.isfile(item_path):
  103. if path not in directory_dict:
  104. directory_dict[path] = []
  105. if filter_types is None or len(filter_types) == 0:
  106. directory_dict[path].append(item_path)
  107. elif str(item_path).split(".")[-1] in filter_types:
  108. if str(item_path).count("~$") == 0:
  109. directory_dict[path].append(item_path)
  110. # 读取路径下所有的excel文件
  111. def read_excel_files(read_path):
  112. directory_dict = {}
  113. __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz'])
  114. return [path for paths in directory_dict.values() for path in paths if path]
  115. # 读取路径下所有的文件
  116. def read_files(read_path):
  117. directory_dict = {}
  118. __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz', 'zip', 'rar'])
  119. return [path for paths in directory_dict.values() for path in paths if path]
  120. def copy_to_new(from_path, to_path):
  121. is_file = False
  122. if to_path.count('.') > 0:
  123. is_file = True
  124. create_file_path(to_path, is_file_path=is_file)
  125. shutil.copy(from_path, to_path)
  126. # 创建路径
  127. def create_file_path(path, is_file_path=False):
  128. if is_file_path:
  129. path = os.path.dirname(path)
  130. if not os.path.exists(path):
  131. os.makedirs(path, exist_ok=True)
  132. # 格式化风机名称
  133. def generate_turbine_name(turbine_name='F0001', prefix='F'):
  134. strinfo = re.compile(r"[\D*]")
  135. name = strinfo.sub('', str(turbine_name))
  136. return prefix + str(int(name)).zfill(3)
  137. if __name__ == '__main__':
  138. # files = read_excel_files(r'D:\trans_data\10.xls')
  139. # for file in files:
  140. file = r'D:\trans_data\新艾里风电场10号风机.csv'
  141. read_file_to_df(file, trans_cols=
  142. ['', '风向', '时间', '设备号', '机舱方向总角度', '$folder[2]', '发电机转速30秒平均值', '机组运行模式', '机舱旋转角度', '主轴转速', '变桨角度30秒平均值', '记录时间',
  143. '发电机功率30秒平均值', '风速30秒平均值'])