WaveTrans.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. import json
  2. import multiprocessing
  3. from service.plt_service import get_all_wind
  4. from service.trans_service import get_wave_conf, save_df_to_db, get_or_create_wave_table, \
  5. get_wave_data, delete_exist_wave_data
  6. from utils.file.trans_methods import *
  7. from utils.systeminfo.sysinfo import get_available_cpu_count_with_percent
  8. from os.path import *
  9. class WaveTrans(object):
  10. def __init__(self, field_code, read_path, save_path: str):
  11. self.field_code = field_code
  12. self.read_path = read_path
  13. self.save_path = save_path
  14. self.begin = datetime.datetime.now()
  15. def get_data_exec(self, func_code, arg):
  16. exec(func_code)
  17. return locals()['get_data'](arg)
  18. def del_exists_data(self, df):
  19. min_date, max_date = df['time_stamp'].min(), df['time_stamp'].max()
  20. db_df = get_wave_data(self.field_code + '_wave', min_date, max_date)
  21. exists_df = pd.merge(db_df, df,
  22. on=['wind_turbine_name', 'time_stamp', 'sampling_frequency', 'mesure_point_name'],
  23. how='inner')
  24. ids = [int(i) for i in exists_df['id'].to_list()]
  25. if ids:
  26. delete_exist_wave_data(self.field_code + "_wave", ids)
  27. def run(self):
  28. all_files = read_files(self.read_path, ['csv'])
  29. print(len)
  30. # 最大取系统cpu的 1/2
  31. split_count = get_available_cpu_count_with_percent(1 / 2)
  32. all_wind, _ = get_all_wind(self.field_code, False)
  33. get_or_create_wave_table(self.field_code + '_wave')
  34. wave_conf = get_wave_conf(self.field_code)
  35. base_param_exec = wave_conf['base_param_exec']
  36. map_dict = {}
  37. if base_param_exec:
  38. base_param_exec = base_param_exec.replace('\r\n', '\n').replace('\t', ' ')
  39. print(base_param_exec)
  40. if 'import ' in base_param_exec:
  41. raise Exception("方法不支持import方法")
  42. mesure_poins = [key for key, value in wave_conf.items() if str(key).startswith('conf_') and value]
  43. for point in mesure_poins:
  44. map_dict[wave_conf[point]] = point.replace('conf_', '')
  45. with multiprocessing.Pool(split_count) as pool:
  46. file_datas = pool.starmap(self.get_data_exec, [(base_param_exec, i) for i in all_files])
  47. print("读取文件耗时:", datetime.datetime.now() - self.begin)
  48. result_list = list()
  49. for file_data in file_datas:
  50. wind_turbine_name, time_stamp, sampling_frequency, rotational_speed, mesure_point_name, mesure_data = \
  51. file_data[0], file_data[1], file_data[2], file_data[3], file_data[4], file_data[5]
  52. if mesure_point_name in map_dict.keys():
  53. result_list.append(
  54. [wind_turbine_name, time_stamp, rotational_speed, sampling_frequency, mesure_point_name,
  55. mesure_data])
  56. df = pd.DataFrame(result_list,
  57. columns=['wind_turbine_name', 'time_stamp', 'rotational_speed', 'sampling_frequency',
  58. 'mesure_point_name', 'mesure_data'])
  59. df['time_stamp'] = pd.to_datetime(df['time_stamp'], errors='coerce')
  60. df['mesure_point_name'] = df['mesure_point_name'].map(map_dict)
  61. df.dropna(subset=['mesure_point_name'], inplace=True)
  62. df['wind_turbine_number'] = df['wind_turbine_name'].map(all_wind).fillna(df['wind_turbine_name'])
  63. df['mesure_data'] = df['mesure_data'].apply(lambda x: json.dumps(x))
  64. df.sort_values(by=['time_stamp', 'mesure_point_name'], inplace=True)
  65. self.del_exists_data(df)
  66. save_df_to_db(self.field_code + '_wave', df, batch_count=1000)
  67. print("总耗时:", datetime.datetime.now() - self.begin)