# -*- coding: utf-8 -*- """ Created on Tue Jul 9 16:28:48 2024 @author: Administrator """ import multiprocessing import os from datetime import datetime, timedelta import pandas as pd import chardet pd.options.mode.copy_on_write = True # 获取文件编码 def detect_file_encoding(filename): # 读取文件的前1000个字节(足够用于大多数编码检测) with open(filename, 'rb') as f: rawdata = f.read(1000) result = chardet.detect(rawdata) encoding = result['encoding'] if encoding is None: encoding = 'gb18030' if encoding and encoding.lower() == 'gb2312' or encoding.lower().startswith("windows"): encoding = 'gb18030' return encoding # 读取数据到df def read_file_to_df(file_path, read_cols=list(), header=0): df = pd.DataFrame() if str(file_path).lower().endswith("csv") or str(file_path).lower().endswith("gz"): encoding = detect_file_encoding(file_path) end_with_gz = str(file_path).lower().endswith("gz") if read_cols: if end_with_gz: df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, compression='gzip', header=header) else: df = pd.read_csv(file_path, encoding=encoding, usecols=read_cols, header=header, on_bad_lines='warn') else: if end_with_gz: df = pd.read_csv(file_path, encoding=encoding, compression='gzip', header=header) else: df = pd.read_csv(file_path, encoding=encoding, header=header, on_bad_lines='warn') else: xls = pd.ExcelFile(file_path) # 获取所有的sheet名称 sheet_names = xls.sheet_names for sheet in sheet_names: if read_cols: df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header, usecols=read_cols)]) else: df = pd.concat([df, pd.read_excel(xls, sheet_name=sheet, header=header)]) return df def __build_directory_dict(directory_dict, path, filter_types=None): # 遍历目录下的所有项 for item in os.listdir(path): item_path = os.path.join(path, item) if os.path.isdir(item_path): __build_directory_dict(directory_dict, item_path, filter_types=filter_types) elif os.path.isfile(item_path): if path not in directory_dict: directory_dict[path] = [] if filter_types is None or len(filter_types) == 0: directory_dict[path].append(item_path) elif str(item_path).split(".")[-1] in filter_types: if str(item_path).count("~$") == 0: directory_dict[path].append(item_path) # 读取所有文件 # 读取路径下所有的excel文件 def read_excel_files(read_path): directory_dict = {} __build_directory_dict(directory_dict, read_path, filter_types=['xls', 'xlsx', 'csv', 'gz']) return [path for paths in directory_dict.values() for path in paths if path] # 创建路径 def create_file_path(path, is_file_path=False): if is_file_path: path = os.path.dirname(path) if not os.path.exists(path): os.makedirs(path, exist_ok=True) def generate_df(pv_df, col): if col != '时间': xiangbian = col.split("逆变器")[0].replace("#", "") nibianqi = col.split("-")[0].split('逆变器')[1] pv_index = col.split("-")[1].replace("PV", "") now_df = pv_df[['时间', col + '输入电流()', col + '输入电压()']] now_df.loc[:, '箱变'] = xiangbian now_df.loc[:, '逆变器'] = nibianqi now_df.loc[:, 'PV'] = pv_index now_df.columns = [df_col.replace(col, "").replace("()", "") for df_col in now_df.columns] now_df['输入电流'] = now_df['输入电流'].astype(float) now_df['输入电压'] = now_df['输入电压'].astype(float) print(xiangbian, nibianqi, pv_index, now_df.shape) return now_df return pd.DataFrame() def read_and_save_csv(file_path, save_path): begin = datetime.now() base_name = os.path.basename(file_path) print('开始', base_name) df = read_file_to_df(file_path) df['时间'] = pd.to_datetime(df['时间']) # df.set_index(keys='时间', inplace=True) pv_df_cols = [col for col in df.columns if col.find('输入电') > -1] pv_df_cols.append('时间') pv_df = df[pv_df_cols] shuru_cols = set([col.split("输入电")[0] for col in pv_df.columns]) with multiprocessing.Pool(6) as pool: dfs = pool.starmap(generate_df, [(pv_df, col) for col in shuru_cols]) saved_pv_df = pd.concat(dfs) saved_pv_df.sort_values(by=['箱变', '逆变器', 'PV', '时间'], inplace=True) save_file = os.path.join(save_path, os.path.basename(file_path).split(".")[0], 'PV.csv') create_file_path(save_file, True) saved_pv_df.to_csv(save_file, encoding='utf-8', index=False) print('结束', base_name, '耗时:' + str(datetime.now() - begin)) if __name__ == '__main__': path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\yuanshi' save_path = r'D:\trans_data\大唐玉湖性能分析离线分析\test\zhengli' all_files = read_excel_files(path) all_datas = list(all_files) all_datas.sort() print(all_datas) for file in all_datas: read_and_save_csv(file, save_path) # with Pool(1) as pool: # pool.starmap(read_and_save_csv, [(i, save_path) for i in all_datas])