import multiprocessing import os.path import traceback from etl.common.PathsAndTable import PathsAndTable from service.trans_conf_service import update_trans_transfer_progress from service.trans_service import save_partation_file_to_db, save_file_to_db from utils.file.trans_methods import split_array from utils.log.trans_log import trans_print from utils.systeminfo.sysinfo import get_available_cpu_count_with_percent class SaveToDb(object): def __init__(self, pathsAndTable: PathsAndTable, update_files, batch_count=100000): self.pathsAndTable = pathsAndTable self.batch_count = batch_count self.update_files = update_files def mutiprocessing_to_save_db(self): # 开始保存到SQL文件 all_saved_files = self.update_files # 映射到的文件保存到数据库 all_saved_files = [i for i in all_saved_files if os.path.basename(i).split(".")[0] in self.pathsAndTable.wind_col_trans.keys()] self.pathsAndTable.create_wind_farm_db() split_count = get_available_cpu_count_with_percent(percent=2 / 3) split_count = split_count if split_count <= len(all_saved_files) else len(all_saved_files) all_arrays = split_array(all_saved_files, split_count) try: for index, arr in enumerate(all_arrays): with multiprocessing.Pool(split_count) as pool: if self.pathsAndTable.read_type in ['minute', 'second']: pool.starmap(save_partation_file_to_db, [(self.pathsAndTable.get_table_name(), file, self.pathsAndTable.wind_col_trans[os.path.basename(file).split(".")[0]], os.path.basename(os.path.dirname(file)), self.batch_count) for file in arr]) else: pool.starmap(save_file_to_db, [(self.pathsAndTable.get_table_name(), file, self.batch_count) for file in arr]) update_trans_transfer_progress(self.pathsAndTable.id, round(70 + 29 * (index + 1) / len(all_arrays), 2), self.pathsAndTable.save_db) except Exception as e: trans_print(traceback.format_exc()) message = "保存到数据库错误,系统返回错误:" + str(e) raise ValueError(message) def run(self): if self.pathsAndTable.save_db: self.mutiprocessing_to_save_db() update_trans_transfer_progress(self.pathsAndTable.id, 99, self.pathsAndTable.save_db)