123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106 |
- import datetime
- import multiprocessing
- import os
- from concurrent.futures.thread import ThreadPoolExecutor
- import pandas as pd
- def __build_directory_dict(directory_dict, path, filter_types=None):
- # 遍历目录下的所有项
- for item in os.listdir(path):
- item_path = os.path.join(path, item)
- if os.path.isdir(item_path):
- __build_directory_dict(directory_dict, item_path, filter_types=filter_types)
- elif os.path.isfile(item_path):
- if path not in directory_dict:
- directory_dict[path] = []
- if filter_types is None or len(filter_types) == 0:
- directory_dict[path].append(item_path)
- elif str(item_path).split(".")[-1] in filter_types:
- if str(item_path).count("~$") == 0:
- directory_dict[path].append(item_path)
- # 读取路径下所有的excel文件
- def read_excel_files(read_path, filter_types=None):
- if filter_types is None:
- filter_types = ['xls', 'xlsx', 'csv', 'gz']
- if os.path.isfile(read_path):
- return [read_path]
- directory_dict = {}
- __build_directory_dict(directory_dict, read_path, filter_types=filter_types)
- return [path for paths in directory_dict.values() for path in paths if path]
- # 读取路径下所有的文件
- def read_files(read_path, filter_types=None):
- if filter_types is None:
- filter_types = ['xls', 'xlsx', 'csv', 'gz', 'zip', 'rar']
- if os.path.isfile(read_path):
- return [read_path]
- directory_dict = {}
- __build_directory_dict(directory_dict, read_path, filter_types=filter_types)
- return [path1 for paths in directory_dict.values() for path1 in paths if path1]
- def get_line_count(file_path):
- with open(file_path, 'r', encoding='utf-8') as file:
- return sum(1 for _ in file)
- def read_file_and_read_count_exec(file_path):
- base_name = os.path.basename(file_path).split('.')[0]
- cols = base_name.split('_')
- cols.append(get_line_count(file_path))
- return cols
- def read_file_and_read_count(index, file_paths, datas):
- pretty_print(f'开始执行:{index + 1}')
- with ThreadPoolExecutor(max_workers=10) as executor:
- colses = list(executor.map(read_file_and_read_count_exec, file_paths))
- datas.extend(colses)
- pretty_print(f'结束执行:{index + 1}],数据长度:{len(datas)}')
- def get_name(x):
- result_str = ''
- if x['col3'] != '无':
- result_str += x['col3']
- result_str += x['col2']
- if x['col4'] != '无':
- result_str += x['col4']
- result_str += x['col6']
- return result_str
- def split_array(array, num):
- return [array[i:i + num] for i in range(0, len(array), num)]
- def pretty_print(*args):
- print(datetime.datetime.now(), ",".join([str(arg) for arg in args]))
- if __name__ == '__main__':
- datas = multiprocessing.Manager().list()
- all_files = read_files(r'D:\cms数据\张崾先风电场2期-导出\CMSFTPServer\ZYXFDC2', ['txt'])
- # all_files = read_files(r'D:\cms数据\测试\result\CMSFTPServer\ZYXFDC2', ['txt'])
- pretty_print(f"文件长度{len(all_files)}")
- arrays = split_array(all_files, 5000)
- pretty_print(f"切分个数{len(arrays)}")
- with multiprocessing.Pool(10) as pool:
- pool.starmap(read_file_and_read_count, [(index, file_paths, datas) for index, file_paths in enumerate(arrays)])
- df = pd.DataFrame(data=list(datas), columns=[f'col{i}' for i in range(10)])
- df['col8'] = pd.to_datetime(df['col8'], format='%Y%m%d%H%M%S', errors='coerce')
- df.sort_values(by=['col1', 'col8'], inplace=True)
- df['测点完整名称'] = df.apply(get_name, axis=1)
- df.to_csv('d://cms数据//cms_data.csv', index=False, encoding='utf8')
|