data_analyse_origin.py 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. import os
  2. import json
  3. import pandas as pd
  4. import numpy as np
  5. import seaborn as sns
  6. import matplotlib.pyplot as plt
  7. from matplotlib.ticker import MaxNLocator
  8. from typing import Tuple, List
  9. import warnings
  10. import time
  11. import sys
  12. import frequency_filter as ff
  13. from datetime import datetime
  14. warnings.filterwarnings("ignore", category=FutureWarning) # 忽略特定警告
  15. plt.rcParams['font.sans-serif'] = ['SimHei'] # 使用黑体
  16. plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
  17. def result_main():
  18. """
  19. 创建data目录,返回历史分析数据存放的文件路径
  20. """
  21. # 获取当前程序的绝对路径
  22. python_interpreter_path = sys.executable
  23. project_directory = os.path.dirname(python_interpreter_path)
  24. data_folder = os.path.join(project_directory, 'data')
  25. # 检查data文件夹是否存在,如果不存在则创建
  26. if not os.path.exists(data_folder):
  27. os.makedirs(data_folder)
  28. # CSV文件路径
  29. csv_file_path = os.path.join(data_folder, 'history_data.csv')
  30. # 检查CSV文件是否存在,如果不存在则创建一个空的CSV文件
  31. if not os.path.exists(csv_file_path):
  32. pd.DataFrame(columns=['时间', '场站', '风机编号', '采样频率',
  33. '叶片1角度偏差', '叶片2角度偏差', '叶片3角度偏差', '相对角度偏差',
  34. '叶片1净空值', '叶片2净空值', '叶片3净空值',
  35. '叶片1扭转', '叶片2扭转', '叶片3扭转', '平均扭转',
  36. '振动幅值', '振动主频']).to_csv(csv_file_path, index=False)
  37. return csv_file_path
  38. def delete_data(name):
  39. """
  40. 删除历史分析数据
  41. :param name: 删除条件
  42. :return: csv文件路径
  43. """
  44. # 获取当前程序的绝对路径
  45. python_interpreter_path = sys.executable
  46. project_directory = os.path.dirname(python_interpreter_path)
  47. data_folder = os.path.join(project_directory, 'data')
  48. # CSV文件路径
  49. csv_file_path = os.path.join(data_folder, 'history_data.csv')
  50. df = pd.read_csv(csv_file_path)
  51. condition = ((df['时间'].astype(str).str.contains(name[0])) &
  52. (df['场站'].astype(str).str.contains(name[1])) &
  53. (df['风机编号'].astype(str).str.contains(name[2])))
  54. # 删除满足条件的行
  55. df = df[~condition]
  56. # 如果需要,可以将修改后的 DataFrame 保存回 CSV 文件
  57. df.to_csv(csv_file_path, index=False)
  58. return csv_file_path
  59. def history_data(name):
  60. """
  61. 读取历史分析数据
  62. :param name: 接口返回列表
  63. :return:
  64. """
  65. time_code = name[0]
  66. wind_name = name[1]
  67. turbine_code = name[2]
  68. # 获取当前程序的绝对路径
  69. python_interpreter_path = sys.executable
  70. project_directory = os.path.dirname(python_interpreter_path)
  71. data_folder = os.path.join(project_directory, 'data')
  72. time_code_cleaned = time_code.replace("-", "").replace(":", "").replace(" ", "")
  73. json_filename = f"{wind_name}_{turbine_code}_{time_code_cleaned}.json"
  74. json_file_path = os.path.join(data_folder, json_filename)
  75. if not os.path.exists(json_file_path):
  76. raise ValueError("文件不存在")
  77. with open(json_file_path, 'r') as f:
  78. data = json.load(f)
  79. return data
  80. def data_analyse(path: List[str]):
  81. """
  82. 创建data目录,把分析数据保存到历史记录中,同时返回全量分析数据
  83. """
  84. # 基础配置参数
  85. locate_file = path[0]
  86. measure_file = path[1]
  87. angle_cone = float(path[2]) # 锥角
  88. axial_inclination = float(path[3]) # 轴向倾角
  89. skew_angle = 5 # 偏航角
  90. noise_reduction = 0.000001 # 如果一个距离值的所有样本量小于总样本量的noise_reduction,则被去掉
  91. min_difference = 1 # 如果相邻2个点的距离差大于min_difference,则被注意是否是周期节点
  92. group_length = [10000, 10000, 5000] # 计算叶片轮廓时每个小切片的长度,三个数分别为叶中、叶根、叶尖切片长度
  93. return_list = []
  94. # 读取文件信息,包括风场名、风机编号、时间、采样频率、2个通道俯仰角
  95. wind_name, turbine_code, time_code, sampling_fq, angle_nan, angle_cen = find_param(locate_file)
  96. wind_name_1, turbine_code_1, time_code_1, sampling_fq_1, angle_tip, angle_root = find_param(measure_file)
  97. sampling_fq_1 = sampling_fq_1 * 1000
  98. sampling_fq = sampling_fq * 1000
  99. print(wind_name, turbine_code, time_code, sampling_fq, angle_nan, angle_cen)
  100. print(wind_name_1, turbine_code_1, time_code, sampling_fq_1, angle_tip, angle_root)
  101. # 读取数据,并检查是否有时间序列异常,分离2通道数据
  102. data_nan, data_cen = process_data(locate_file)
  103. data_tip, data_root = process_data(measure_file)
  104. # 全部数据进行降噪、去除异常点处理,叶根叶尖数据计算叶片扫掠起始、结束点,轮毂中心数据计算距离均值
  105. start_tip, end_tip, filtered_data_tip = cycle_calculate(data_tip, noise_reduction, min_difference)
  106. start_root, end_root, filtered_data_root = cycle_calculate(data_root, noise_reduction, min_difference)
  107. start_nan, end_nan, filtered_data_nan = cycle_calculate(data_nan, noise_reduction, min_difference)
  108. # 轮毂中心数据降噪,并求均值得到塔筒中心距离,再把降噪后的数据根据俯仰角转换为水平方向的振动
  109. filtered_data_cen = tower_filter(data_cen, noise_reduction)
  110. dist_cen = np.mean(filtered_data_cen.iloc[:, 1].tolist())
  111. filtered_data_cen.iloc[:, 1] = filtered_data_cen.iloc[:, 1] * np.cos(np.deg2rad(angle_cen + axial_inclination))
  112. # 检查起始结束点顺序,确保叶根叶尖测点同步开始、结束
  113. if end_tip.iloc[0, 0] < start_root.iloc[0, 0]:
  114. start_tip = start_tip.drop(start_tip.index[0])
  115. end_tip = end_tip.drop(end_tip.index[0])
  116. if start_root.iloc[0, 0] < start_tip.iloc[0, 0] < end_tip.iloc[0, 0] < end_root.iloc[0, 0]:
  117. pass
  118. else:
  119. raise ValueError("The elements are not in the expected order.")
  120. # 计算叶根、叶中、叶尖处的塔筒距离,对轮毂中心做FFT分析
  121. tower_dist_tip = ff.tower_cal(filtered_data_tip, start_tip, end_tip, sampling_fq_1)
  122. tower_dist_root = ff.tower_cal(filtered_data_root, start_root, end_root, sampling_fq_1)
  123. tower_dist_nan = ff.tower_cal(filtered_data_nan, start_nan, end_nan, sampling_fq)
  124. lowpass_data, fft_x, fft_y, tower_freq, tower_max= ff.process_fft(filtered_data_cen, sampling_fq)
  125. # 根据起始结束点,对叶根、对叶片数据进行归一化处理,计算每个叶片的散点表、线表、边界点表、标准循环周期长度、每个叶片平均最小值
  126. result_line_tip, result_scatter_tip, border_rows_tip, cycle_len_tip, min_tip \
  127. = data_normalize(filtered_data_tip, start_tip, end_tip, group_length[0])
  128. result_line_root, result_scatter_root, border_rows_root, cycle_len_root, min_root \
  129. = data_normalize(filtered_data_root, start_root, end_root, group_length[1])
  130. result_line_nan, result_scatter_nan, border_rows_nan, cycle_len_nan, min_nan \
  131. = data_normalize(filtered_data_nan, start_nan, end_nan, group_length[2])
  132. # 计算3个叶片的平均轮廓,3个叶片的形状差
  133. result_avg_tip, result_diff_tip = blade_shape(result_line_tip)
  134. result_avg_root, result_diff_root = blade_shape(result_line_root)
  135. # 对叶尖的边界点表和俯仰角做坐标归一化处理
  136. border_rows_tip_new, angle_tip_new = coordinate_normalize(border_rows_tip, angle_tip)
  137. border_rows_nan_new, angle_nan_new = coordinate_normalize(border_rows_nan, angle_nan)
  138. # 对叶片的边界点表做半径计算
  139. tip_r = radius_cal(border_rows_tip_new, angle_tip_new, dist_cen, angle_cen, axial_inclination, angle_cone)
  140. root_r = radius_cal(border_rows_root, angle_root, dist_cen, angle_cen, axial_inclination, angle_cone)
  141. nan_r = radius_cal(border_rows_nan_new, angle_nan_new, dist_cen, angle_cen, axial_inclination, angle_cone)
  142. # 计算叶片测量位置处的绝对桨距角、相对桨距角、线速度、叶片内部中心点距离
  143. pitch_angle_tip, aero_dist_tip, v_speed_tip, cen_blade_tip = (
  144. blade_angle_aero_dist(border_rows_tip, tip_r, cycle_len_tip, tower_dist_tip, angle_tip_new))
  145. pitch_angle_root, aero_dist_root, v_speed_root, cen_blade_root = (
  146. blade_angle_aero_dist(border_rows_root, root_r, cycle_len_root, tower_dist_root, angle_root))
  147. pitch_angle_nan, aero_dist_nan, v_speed_nan, cen_blade_nan = (
  148. blade_angle_aero_dist(border_rows_nan_new, nan_r, cycle_len_nan, tower_dist_nan, angle_nan_new))
  149. # 将列表转换为 numpy 数组
  150. cen_blade_tip_array = np.array(cen_blade_tip)
  151. cen_blade_nan_array = np.array(cen_blade_nan)
  152. min_tip_array = np.array(min_tip)
  153. min_nan_array = np.array(min_nan)
  154. # 计算叶片内部中心点距离与叶片最小值之间的差值
  155. abs_diff = np.abs(cen_blade_tip_array - min_tip_array)
  156. abs_diff_nan = np.abs(cen_blade_nan_array - min_nan_array)
  157. blade_dist_tip = abs_diff * np.cos(np.deg2rad(angle_tip_new))
  158. blade_dist_nan = abs_diff_nan * np.cos(np.deg2rad(angle_nan_new))
  159. blade_dist_tip.tolist()
  160. blade_dist_nan.tolist()
  161. # 计算叶片转速-净空散点表
  162. dist_distribute = blade_dist_distribute_cal(filtered_data_tip, start_tip, end_tip,
  163. tower_dist_tip, angle_tip_new, blade_dist_tip)
  164. dist_distribute_nan = blade_dist_distribute_cal(filtered_data_nan, start_nan, end_nan,
  165. tower_dist_nan, angle_nan_new, blade_dist_nan)
  166. # dist_distribute = [df.round(5) for df in dist_distribute]
  167. dist_distribute = [df.round(5) for df in dist_distribute_nan]
  168. # 获取净空距离的最小值和最大值,以及它们对应的转速值,并分别保存在列表中
  169. min_values = []
  170. min_keys = []
  171. max_values = []
  172. max_keys = []
  173. mean_values = []
  174. for df in dist_distribute:
  175. second_col_min = df[df.columns[1]].min()
  176. second_col_max = df[df.columns[1]].max()
  177. min_row = df[df[df.columns[1]] == second_col_min]
  178. max_row = df[df[df.columns[1]] == second_col_max]
  179. min_values.append(round(second_col_min, 2))
  180. min_keys.append(round(min_row.iloc[0][df.columns[0]], 2))
  181. max_values.append(round(second_col_max, 2))
  182. max_keys.append(round(max_row.iloc[0][df.columns[0]], 2))
  183. for i in range(3):
  184. mean_values.append(round((max_values[i] + min_values[i]) / 2, 2))
  185. # 将叶片线表数据乘以线速度,和俯仰角,得到叶片横截面的真实轮廓
  186. for df in result_line_tip:
  187. first_column = df.iloc[:, 0]
  188. sec_column = df.iloc[:, 1]
  189. df.iloc[:, 0] = first_column * v_speed_tip
  190. df.iloc[:, 1] = sec_column * np.cos(np.deg2rad(angle_tip_new + angle_cone + axial_inclination))
  191. for df in result_line_root:
  192. first_column = df.iloc[:, 0]
  193. sec_column = df.iloc[:, 1]
  194. df.iloc[:, 0] = first_column * v_speed_root
  195. df.iloc[:, 1] = sec_column * np.cos(np.deg2rad(angle_root + angle_cone + axial_inclination))
  196. for df in result_scatter_tip:
  197. first_column = df.iloc[:, 0]
  198. sec_column = df.iloc[:, 1]
  199. df.iloc[:, 0] = first_column * v_speed_tip
  200. df.iloc[:, 1] = sec_column * np.cos(np.deg2rad(angle_tip_new + angle_cone + axial_inclination))
  201. for df in result_scatter_root:
  202. first_column = df.iloc[:, 0]
  203. sec_column = df.iloc[:, 1]
  204. df.iloc[:, 0] = first_column * v_speed_root
  205. df.iloc[:, 1] = sec_column * np.cos(np.deg2rad(angle_root + angle_cone + axial_inclination))
  206. # 将叶片平均轮廓数据乘以线速度,得到实际叶片长度
  207. avg_tip = result_avg_tip.iloc[:, 0]
  208. result_avg_tip.iloc[:, 0] = avg_tip * v_speed_tip
  209. avg_root = result_avg_root.iloc[:, 0]
  210. result_avg_root.iloc[:, 0] = avg_root * v_speed_root
  211. # 计算叶片扭转角度
  212. twist_1 = round(np.abs(pitch_angle_root[0] - pitch_angle_tip[0]), 2)
  213. twist_2 = round(np.abs(pitch_angle_root[1] - pitch_angle_tip[1]), 2)
  214. twist_3 = round(np.abs(pitch_angle_root[2] - pitch_angle_tip[2]), 2)
  215. twist_avg = round((twist_1 + twist_2 + twist_3) / 3, 2)
  216. # 降低给数据采样频率,降低接口负担
  217. sampling_num = int(0.015 * sampling_fq_1)
  218. # 将原始数据的时间列由计时时钟转换为实际时间
  219. data_tip.iloc[:, 0] = data_tip.iloc[:, 0] / 5000000
  220. data_root.iloc[:, 0] = data_root.iloc[:, 0] / 5000000
  221. lowpass_data.iloc[:, 0] = lowpass_data.iloc[:, 0] / 5000000
  222. # 将需要保存到CSV的数据添加到return_list中
  223. return_list.append(str(time_code))
  224. return_list.append(str(wind_name))
  225. return_list.append(str(turbine_code))
  226. return_list.append(sampling_fq_1)
  227. return_list.append(pitch_angle_root[0])
  228. return_list.append(pitch_angle_root[1])
  229. return_list.append(pitch_angle_root[2])
  230. return_list.append(pitch_angle_root[3])
  231. return_list.append(mean_values[0])
  232. return_list.append(mean_values[1])
  233. return_list.append(mean_values[2])
  234. return_list.append(twist_1)
  235. return_list.append(twist_2)
  236. return_list.append(twist_3)
  237. return_list.append(twist_avg)
  238. return_list.append(tower_max)
  239. return_list.append(tower_freq)
  240. # 将return_list转换为DataFrame并追加到CSV文件
  241. df_new_row = pd.DataFrame([return_list],
  242. columns=['时间', '场站', '风机编号', '采样频率',
  243. '叶片1角度偏差', '叶片2角度偏差', '叶片3角度偏差', '相对角度偏差',
  244. '叶片1净空值', '叶片2净空值', '叶片3净空值',
  245. '叶片1扭转', '叶片2扭转', '叶片3扭转', '平均扭转',
  246. '振动幅值', '振动主频'])
  247. json_output = {
  248. 'original_plot': {
  249. 'blade_tip': {
  250. 'xdata': data_tip.iloc[:, 0].tolist()[::sampling_num],
  251. 'ydata': data_tip.iloc[:, 1].tolist()[::sampling_num]
  252. },
  253. 'blade_root': {
  254. 'xdata': data_root.iloc[:, 0].tolist()[::sampling_num],
  255. 'ydata': data_root.iloc[:, 1].tolist()[::sampling_num]
  256. }
  257. },
  258. 'fft_plot': {
  259. 'lowpass': {
  260. 'xdata': lowpass_data['time'].tolist()[::sampling_num],
  261. 'ydata': lowpass_data['distance_filtered'].tolist()[::sampling_num],
  262. 'xmax': max(lowpass_data['time'].tolist()),
  263. 'xmin': min(lowpass_data['time'].tolist()),
  264. 'ymax': max(lowpass_data['distance_filtered'].tolist()) + 0.02,
  265. 'ymin': min(lowpass_data['distance_filtered'].tolist()) - 0.02
  266. },
  267. 'fft': {
  268. 'xdata': fft_x,
  269. 'ydata': fft_y,
  270. 'xmax': max(fft_x),
  271. 'xmin': min(fft_x),
  272. 'ymax': max(fft_y) + 0.02,
  273. 'ymin': 0
  274. }
  275. },
  276. 'blade_tip': {
  277. 'first_blade': {
  278. 'xdata': result_line_tip[0].iloc[:, 0].tolist(),
  279. 'ydata': result_line_tip[0].iloc[:, 1].tolist()
  280. },
  281. 'second_blade': {
  282. 'xdata': result_line_tip[1].iloc[:, 0].tolist(),
  283. 'ydata': result_line_tip[1].iloc[:, 1].tolist()
  284. },
  285. 'third_blade': {
  286. 'xdata': result_line_tip[2].iloc[:, 0].tolist(),
  287. 'ydata': result_line_tip[2].iloc[:, 1].tolist()
  288. },
  289. 'avg_blade': {
  290. 'xdata': result_avg_tip.iloc[:, 0].tolist(),
  291. 'ydata': result_avg_tip.iloc[:, 1].tolist()
  292. }
  293. },
  294. 'blade_root': {
  295. 'first_blade': {
  296. 'xdata': result_line_root[0].iloc[:, 0].tolist(),
  297. 'ydata': result_line_root[0].iloc[:, 1].tolist()
  298. },
  299. 'second_blade': {
  300. 'xdata': result_line_root[1].iloc[:, 0].tolist(),
  301. 'ydata': result_line_root[1].iloc[:, 1].tolist()
  302. },
  303. 'third_blade': {
  304. 'xdata': result_line_root[2].iloc[:, 0].tolist(),
  305. 'ydata': result_line_root[2].iloc[:, 1].tolist()
  306. },
  307. 'avg_blade': {
  308. 'xdata': result_avg_root.iloc[:, 0].tolist(),
  309. 'ydata': result_avg_root.iloc[:, 1].tolist()
  310. }
  311. },
  312. 'dist_distribution': {
  313. 'first_blade': {
  314. 'xdata': dist_distribute[0].iloc[:, 0].tolist(),
  315. 'ydata': dist_distribute[0].iloc[:, 1].tolist()
  316. },
  317. 'second_blade': {
  318. 'xdata': dist_distribute[1].iloc[:, 0].tolist(),
  319. 'ydata': dist_distribute[1].iloc[:, 1].tolist()
  320. },
  321. 'third_blade': {
  322. 'xdata': dist_distribute[2].iloc[:, 0].tolist(),
  323. 'ydata': dist_distribute[2].iloc[:, 1].tolist()
  324. }
  325. },
  326. 'analyse_table': {
  327. 'pitch_angle_diff': {
  328. 'blade_1': pitch_angle_root[0],
  329. 'blade_2': pitch_angle_root[1],
  330. 'blade_3': pitch_angle_root[2],
  331. 'blade_relate': pitch_angle_root[3]
  332. },
  333. 'aero_dist': {
  334. 'first_blade': {
  335. 'x_min': min_keys[0],
  336. 'y_min': min_values[0],
  337. 'x_max': max_keys[0],
  338. 'y_max': max_values[0],
  339. 'y_diff': np.abs(max_values[0] - min_values[0]),
  340. 'y_ava': mean_values[0]
  341. },
  342. 'second_blade': {
  343. 'x_min': min_keys[1],
  344. 'y_min': min_values[1],
  345. 'x_max': max_keys[1],
  346. 'y_max': max_values[1],
  347. 'y_diff': np.abs(max_values[1] - min_values[1]),
  348. 'y_ava': mean_values[1]
  349. },
  350. 'third_blade': {
  351. 'x_min': min_keys[2],
  352. 'y_min': min_values[2],
  353. 'x_max': max_keys[2],
  354. 'y_max': max_values[2],
  355. 'y_diff': np.abs(max_values[2] - min_values[2]),
  356. 'y_ava': mean_values[2]
  357. }
  358. },
  359. 'blade_twist': {
  360. 'blade_1': twist_1,
  361. 'blade_2': twist_2,
  362. 'blade_3': twist_3,
  363. 'blade_avg': twist_avg
  364. },
  365. 'tower_vibration': {
  366. 'max_vibration': tower_max,
  367. 'main_vibration_freq': tower_freq
  368. }
  369. }
  370. }
  371. # 获取当前程序的绝对路径
  372. python_interpreter_path = sys.executable
  373. project_directory = os.path.dirname(python_interpreter_path)
  374. data_folder = os.path.join(project_directory, 'data')
  375. # 检查data文件夹是否存在,如果不存在则创建
  376. if not os.path.exists(data_folder):
  377. os.makedirs(data_folder)
  378. # CSV文件路径
  379. csv_file_path = os.path.join(data_folder, 'history_data.csv')
  380. # 检查CSV文件是否存在,如果不存在则创建一个空的CSV文件
  381. if not os.path.exists(csv_file_path):
  382. pd.DataFrame(columns=['时间', '场站', '风机编号', '采样频率',
  383. '叶片1角度偏差', '叶片2角度偏差', '叶片3角度偏差', '相对角度偏差',
  384. '叶片1净空值', '叶片2净空值', '叶片3净空值',
  385. '叶片1扭转', '叶片2扭转', '叶片3扭转', '平均扭转',
  386. '振动幅值', '振动主频']).to_csv(csv_file_path, index=False)
  387. df_new_row.to_csv(csv_file_path, mode='a', header=False, index=False)
  388. time_code_cleaned = time_code.replace("-", "").replace(":", "").replace(" ", "")
  389. json_filename = f"{wind_name}_{turbine_code}_{time_code_cleaned}.json"
  390. json_file_path = os.path.join(data_folder, json_filename)
  391. with open(json_file_path, 'w') as json_file:
  392. json.dump(json_output, json_file, indent=4)
  393. print('csv文件路径' + str(csv_file_path))
  394. # print(result_line_tip[0].iloc[:, 0])
  395. # print(result_line_root[0].iloc[:, 0])
  396. print('振动主频' + str(tower_freq))
  397. print('振动幅值' + str(tower_max))
  398. print('净空最小值', min_values)
  399. print('最小值对应的键', min_keys)
  400. print('净空最大值', max_values)
  401. print('最大值对应的键', max_keys)
  402. print('叶尖速度' + str(v_speed_tip), '叶根速度' + str(v_speed_root))
  403. print('新俯仰角' + str(angle_tip_new))
  404. print('轮毂中心距离' + str(dist_cen))
  405. print('叶根原始数据采样时间长度' + str(data_root.iloc[-1, 0]))
  406. print('-' * 50)
  407. print(json.dumps(json_output, indent=4, ensure_ascii=False))
  408. # plot_data(result_line_tip, 'line', 'data1')
  409. # plot_data(result_diff_tip, 'line', 'data_diff_1')
  410. # plot_data(result_scatter_tip, 'scatter', 'data1')
  411. plot_data(result_line_root, 'line', 'data2')
  412. # plot_data(result_diff_root, 'line', 'data_diff_2')
  413. plot_data(result_scatter_root, 'scatter', 'data2')
  414. # plot_data(dist_distribute, 'scatter', 'dist_distribute')
  415. return json_output
  416. def process_data(file_path):
  417. """
  418. 打开、解决时间重置、按时间清洗异常值、分列数据
  419. """
  420. # 读取第2、4、9列的数据
  421. data = pd.read_csv(file_path, usecols=[1, 3, 4, 8, 9], header=None, engine='c')
  422. data = data.head(int(len(data) * 0.95))
  423. print('原始数据长度' + str(len(data)))
  424. '''
  425. # 绘制原始数据图
  426. # 只取前1%的数据
  427. # data = data.head(int(len(data)* 0.01))
  428. data.columns = ['time', 'distance1', 'distance2']
  429. plt.figure(figsize=(300, 150))
  430. sns.scatterplot(data=data, x='time', y='distance1', s=50, color='green')
  431. sns.scatterplot(data=data, x='time', y='distance2', s=50, color='red')
  432. abxy = plt.gca() # 获取当前坐标轴对象
  433. plt.grid(linewidth=2) # 设置网格线宽度为2
  434. abxy.xaxis.set_major_locator(MaxNLocator(nbins=100)) # 设置x轴主刻度的最大数量为10
  435. plt.xlabel('时间', fontsize=16, fontweight='bold') # 添加x轴标签
  436. plt.ylabel('距离(m)', fontsize=16, fontweight='bold') # 添加y轴标签
  437. abxy.tick_params(axis='x', labelsize=14, labelcolor='black', width=2) # 设置x轴刻度标签
  438. abxy.tick_params(axis='y', labelsize=14, labelcolor='black', width=2) # 设置y轴刻度标签
  439. plt.savefig(f"{"original"}.png", dpi=100, pil_kwargs={"icc_profile": False})
  440. plt.close()
  441. '''
  442. # 找到第一列中最大值和最小值的位置
  443. max_value = data.iloc[:, 0].max()
  444. max_index = data.iloc[:, 0].idxmax()
  445. min_index = data.iloc[:, 0].idxmin()
  446. # 检查最小值的位置是否是最大值位置的下一个
  447. if min_index == max_index + 1:
  448. # 将最小值及其之后的所有值都加上最大值
  449. data.iloc[min_index:, 0] += max_value
  450. # 按时间列筛选清洗异常值
  451. last_time = data.iloc[-1, 0]
  452. first_time = data.iloc[0, 0]
  453. filtered_data = data[(data.iloc[:, 0] > last_time) & (data.iloc[:, 0] < first_time)]
  454. print(f'时间列异常数据: {filtered_data}')
  455. print(f'起止时间: {first_time}, {last_time}')
  456. data = data[data.iloc[:, 0] >= first_time]
  457. data = data[data.iloc[:, 0] <= last_time]
  458. data.reset_index(drop=True, inplace=True)
  459. # 计算最小值
  460. min_time = data.iloc[:, 0].min()
  461. data.iloc[:, 0] -= min_time
  462. # 分为两组数据
  463. data_1 = data.iloc[:, [0, 1, 2]]
  464. data_2 = data.iloc[:, [0, 3, 4]]
  465. # 分别命名列
  466. data_1.columns = ['time', 'distance', 'grey']
  467. data_2.columns = ['time', 'distance', 'grey']
  468. return data_1, data_2
  469. def tower_filter(data_group: pd.DataFrame, noise_threshold: float):
  470. """
  471. 对轮毂中心数据进行降噪,和前项填充
  472. :param data_group: process_data计算完成后轮毂中心的数据。
  473. :param noise_threshold: 去掉占比小于noise_threshold的数据。
  474. :return: filtered_data:降噪后的数据
  475. """
  476. print('正在进行数据清洗......')
  477. time.sleep(1)
  478. # 计算distance的分布
  479. distance_counts = data_group['distance'].value_counts(normalize=True)
  480. noise_distance_threshold = distance_counts[distance_counts < noise_threshold].index
  481. noise_indices = data_group[data_group['distance'].isin(noise_distance_threshold)].index
  482. data_group.loc[noise_indices, 'distance'] = np.nan
  483. # 选择频率最大的5个值
  484. top_5_distances = distance_counts.head(5).index
  485. mean_values = data_group[data_group['distance'].isin(top_5_distances)]['distance'].mean()
  486. data_group.loc[(data_group['distance'] < mean_values-20) | (
  487. data_group['distance'] > mean_values*1.1), 'distance'] = np.nan
  488. nan_count = data_group['distance'].isna().sum()
  489. all_count = data_group.shape[0]
  490. print(f"中值是:{mean_values},替换为NaN的异常distance值的数量是: {nan_count}, 总数量是: {all_count},"
  491. f"占比: {nan_count / all_count * 100:.2f}%")
  492. # 前向填充
  493. data_group['distance'] = data_group['distance'].fillna(method='ffill')
  494. filtered_data = data_group
  495. return filtered_data
  496. def cycle_calculate(data_group: pd.DataFrame, noise_threshold: float, min_distance: float):
  497. """
  498. 对数据进行降噪,和前项填充;计算数据的周期节点,叶片前缘突变点、后缘突变点
  499. :param data_group: process_data计算完成后的数据。
  500. :param noise_threshold: 去掉占比小于noise_threshold的数据。
  501. :param min_distance: 区分叶片和塔筒的距离差值。
  502. :return: start_points:周期开始点, end_points:周期结束点, filtered_data:降噪后的数据
  503. """
  504. print('正在计算周期节点......')
  505. time.sleep(1)
  506. # 计算distance的分布
  507. distance_counts = data_group['distance'].value_counts(normalize=True)
  508. noise_distance_threshold = distance_counts[distance_counts < noise_threshold].index
  509. noise_indices = data_group[data_group['distance'].isin(noise_distance_threshold)].index
  510. data_group.loc[noise_indices, 'distance'] = np.nan
  511. # 选择频率最大的5个值
  512. top_5_distances = distance_counts.head(5).index
  513. mean_values = data_group[data_group['distance'].isin(top_5_distances)]['distance'].mean()
  514. data_group.loc[(data_group['distance'] < mean_values-30) | (
  515. data_group['distance'] > mean_values*1.1), 'distance'] = np.nan
  516. nan_count = data_group['distance'].isna().sum()
  517. all_count = data_group.shape[0]
  518. print(f"中值是:{mean_values},替换为NaN的distance异常值的数量是: {nan_count}, 总数量是: {all_count},"
  519. f"占比: {nan_count / all_count * 100:.2f}%")
  520. # 前向填充
  521. data_group['distance'] = data_group['distance'].fillna(method='ffill')
  522. filtered_data = data_group
  523. # 计算相邻两行distance的差值
  524. filtered_data['distance_diff'] = filtered_data['distance'].diff()
  525. large_diff_indices = filtered_data[filtered_data['distance_diff'] > min_distance].index
  526. small_diff_indices = filtered_data[filtered_data['distance_diff'] < -min_distance].index
  527. filtered_data = filtered_data.drop(columns=['distance_diff'])
  528. start_points = pd.DataFrame()
  529. end_points = pd.DataFrame()
  530. # 遍历所有差值大于的行
  531. for idx in large_diff_indices:
  532. # 获取当前行的 distance 值
  533. current_distance = filtered_data.loc[idx, 'distance']
  534. next_rows_large = filtered_data.loc[idx - 200: idx - 1]
  535. # 检查是否任意 distance 的值小于 current_distance - 2
  536. if next_rows_large['distance'].le(current_distance - min_distance).all():
  537. # 如果都小于,则将当前行和下一行添加到 special_points 中
  538. end_points = pd.concat([end_points, filtered_data.loc[[idx - 1]]])
  539. for idx in small_diff_indices:
  540. # 获取当前行的 distance 值
  541. current_distance = filtered_data.loc[idx - 1, 'distance']
  542. next_rows_small = filtered_data.iloc[idx: idx + 200]
  543. # 检查是否任意 distance 的值小于 current_distance - 2
  544. if next_rows_small['distance'].le(current_distance - min_distance).all():
  545. # 如果都小于,则将当前行和下一行添加到 special_points 中
  546. start_points = pd.concat([start_points, filtered_data.loc[[idx]]])
  547. if end_points.iloc[0, 0] < start_points.iloc[0, 0]:
  548. end_points = end_points.drop(end_points.index[0])
  549. if end_points.iloc[-1, 0] < start_points.iloc[-1, 0]:
  550. start_points = start_points.drop(start_points.index[-1])
  551. else:
  552. pass
  553. return start_points, end_points, filtered_data
  554. def data_normalize(data_group: pd.DataFrame, start_points: pd.DataFrame, end_points: pd.DataFrame, group_len: int) \
  555. -> Tuple[List[pd.DataFrame], List[pd.DataFrame], List[pd.DataFrame], int, list]:
  556. """
  557. 提取每个叶片的数据并归一化,输出散点图和拟合图
  558. :param data_group: cycle_calculate计算完成后的数据。
  559. :param start_points: 所有每个周期开始点,叶片前缘突变点。
  560. :param end_points: 叶片后缘突变点。
  561. :param group_len: 每个分组的长度。
  562. :return: turbines_processed: 每个叶片的拟合数据,
  563. turbines_scattered: 每个叶片的散点数据,
  564. border_rows: 每个叶片的2个边缘数据,
  565. normalize_cycle: 周期长度
  566. """
  567. print('正在进行各周期归一化......')
  568. time.sleep(1)
  569. combined_df_sorted = pd.concat([start_points, end_points]).sort_values(by='time')
  570. # 检查排序后的数据从start开始,end结束
  571. if combined_df_sorted.iloc[0].equals(end_points.iloc[0]):
  572. combined_df_sorted = combined_df_sorted.iloc[1:]
  573. if combined_df_sorted.iloc[-1].equals(start_points.iloc[-1]):
  574. combined_df_sorted = combined_df_sorted.iloc[:-1]
  575. combined_df_sorted.reset_index(drop=True, inplace=True)
  576. # 将 start_points 中的时间点转换为列表
  577. start_times = combined_df_sorted['time'].tolist()
  578. print('本次测量风机完整旋转圈数:'+ str(len(start_times) / 2))
  579. time.sleep(1)
  580. normalize_cycle = start_times[1] - start_times[0]
  581. full_cycle = int((start_times[2] - start_times[0]) * 3)
  582. turbines = [pd.DataFrame() for _ in range(3)]
  583. # 遍历所有起始时间点
  584. for i in range(0, len(start_times), 2):
  585. # 获取当前起始和结束时间点
  586. start_time = start_times[i]
  587. end_time = start_times[i + 1]
  588. # 根据当前起始时间点和结束时间点对数据进行分段
  589. segment = data_group[(data_group['time'] > start_time) & (data_group['time'] <= end_time)]
  590. if segment is None:
  591. pass
  592. else:
  593. # 周期归一化
  594. ratio = (end_time - start_time) / normalize_cycle
  595. segment.loc[:, 'time'] = (segment['time'] - start_time) / ratio
  596. # segment.loc[:, 'distance'] = ff.butter_lowpass_filter(segment['distance'], cutoff_low, fs)
  597. # 将结果添加到相应的 turbine 数据框中
  598. turbines[i % 3] = pd.concat([turbines[i % 3], segment])
  599. # 数据分组清洗、求平均
  600. turbines_processed = []
  601. turbines_scattered = []
  602. min_list = []
  603. plot_points = []
  604. diff_line = []
  605. sd_time = [-1, -1]
  606. time_list = list(range(0, normalize_cycle, group_len))
  607. # time_list = [(i + 1) * normalize_cycle / fs * 100 for i in range(fs * 100)] # 生成时间序列
  608. for turbine in turbines:
  609. # 按时间排序
  610. turbine_sorted = turbine.sort_values(by='time').reset_index(drop=True)
  611. grey_start_index = int(len(turbine_sorted) * 0.1)
  612. grey_end_index = int(len(turbine_sorted) * 0.9)
  613. subset_grey = turbine_sorted[grey_start_index:grey_end_index]
  614. mean_grey = subset_grey['grey'].mean() * 0.8 # 0.8
  615. turbine_sorted = turbine_sorted[turbine_sorted['grey'] > mean_grey]
  616. # 找到time列的第一个值
  617. first_time = turbine_sorted['time'].iloc[0]
  618. # 分组,时间列每1000为一组(每40个时间点一组)
  619. bins = list(range(int(first_time), int(turbine_sorted['time'].max()), group_len))
  620. # 原始代码
  621. # bins = list(range(int(first_time), int(turbine_sorted['time'].max()) + len(start_times), int(fs / 50)))
  622. grouped = turbine_sorted.groupby(pd.cut(turbine_sorted['time'], bins=bins, right=False))
  623. # 初始化一个空的 DataFrame 用于存储处理后的数据
  624. processed_df = pd.DataFrame()
  625. scattered_df = pd.DataFrame()
  626. mean_points = []
  627. diff_points = []
  628. # 对每个组进行处理
  629. for _, group in grouped:
  630. # 去除 distance 最大和最小的前5%
  631. quantile_5 = group['distance'].quantile(0.05)
  632. quantile_95 = group['distance'].quantile(0.95)
  633. filtered_group = group[(group['distance'] > quantile_5) & (group['distance'] < quantile_95)]
  634. # 计算均值
  635. mean_point = filtered_group['distance'].mean()
  636. mean_points.append(mean_point)
  637. # 遍历 mean_points 列表,计算每个元素与其下一个元素的差值
  638. for i in range(len(mean_points) - 1):
  639. diff = abs(mean_points[i + 1] - mean_points[i])
  640. diff_points.append(diff)
  641. start_index = int(len(diff_points) * 0.05)
  642. end_index = int(len(diff_points) * 0.95)
  643. subset1 = diff_points[start_index:end_index]
  644. sdr_diff = np.max(subset1) * 1.1 # 1.1
  645. min_list.append(min(mean_points))
  646. # 找到第一个和最后一个小于 sdr_diff 的序号
  647. first_index = np.where(diff_points < sdr_diff)[0][0]
  648. last_index = np.where(diff_points < sdr_diff)[0][-1]
  649. plot_points.append(diff_points)
  650. diff_line.append(sdr_diff)
  651. for index, (bin, group) in enumerate(grouped):
  652. # 去除 distance 最大和最小的前5%
  653. quantile_5 = group['distance'].quantile(0.05)
  654. quantile_95 = group['distance'].quantile(0.95)
  655. filtered_group = group[(group['distance'] > quantile_5) & (group['distance'] < quantile_95)]
  656. if first_index <= index < last_index: # 如果斜率小于,则认为该组数据不是突变点
  657. # 计算中点
  658. mid_point = filtered_group.mean()
  659. # 将中点转换为 DataFrame 并添加到处理后的 DataFrame 中
  660. mid_point_df = pd.DataFrame([mid_point])
  661. mid_point_df.iloc[0, 0] = time_list[index]
  662. processed_df = pd.concat([processed_df, mid_point_df], ignore_index=True)
  663. scattered_df = pd.concat([scattered_df, filtered_group], ignore_index=True)
  664. else: pass
  665. # 找到time列的最小值和最大值
  666. min_time = processed_df['time'].min()
  667. max_time = processed_df['time'].max()
  668. if sd_time == [-1, -1]:
  669. sd_time = [min_time, max_time]
  670. elif sd_time[0] < min_time:
  671. sd_time[0] = min_time
  672. elif sd_time[1] > max_time:
  673. sd_time[1] = max_time
  674. # 将处理后的 DataFrame 添加到列表中
  675. turbines_processed.append(processed_df)
  676. turbines_scattered.append(scattered_df)
  677. """# 创建一个总图中有3个分图的形式
  678. fig, axs = plt.subplots(1, 3, figsize=(15, 9))
  679. plt.subplots_adjust(wspace=0.3) # 调整子图之间的水平间距
  680. # 绘制第一张图
  681. axs[0].plot(plot_points[0], label='Diff Points', color='blue', marker='x', markersize=5)
  682. axs[0].axhline(y=diff_line[0], color='red', linestyle='--')
  683. axs[0].legend()
  684. axs[0].set_title('Diff Points (Index 1)')
  685. axs[0].set_xlabel('Index')
  686. axs[0].set_ylabel('Value')
  687. # 绘制第二张图
  688. axs[1].plot(plot_points[1], label='Diff Points', color='blue', marker='x', markersize=5)
  689. axs[1].axhline(y=diff_line[1], color='red', linestyle='--')
  690. axs[1].legend()
  691. axs[1].set_title('Diff Points (Index 2)')
  692. axs[1].set_xlabel('Index')
  693. axs[1].set_ylabel('Value')
  694. # 绘制第三张图
  695. axs[2].plot(plot_points[2], label='Diff Points', color='blue', marker='x', markersize=5)
  696. axs[2].axhline(y=diff_line[2], color='red', linestyle='--')
  697. axs[2].legend()
  698. axs[2].set_title('Diff Points (Index 3)')
  699. axs[2].set_xlabel('Index')
  700. axs[2].set_ylabel('Value')
  701. # 显示图形
  702. plt.tight_layout()
  703. plt.show()"""
  704. # 把三组叶片数据按sd_time进行筛选,并把每个的边界数据保存
  705. border_rows = []
  706. for i, turbine in enumerate(turbines_processed):
  707. # 找到离 sd_time[0] 最近的行的索引
  708. closest_index_0 = (turbine['time'] - sd_time[0]).abs().idxmin()
  709. turbine.at[closest_index_0, 'time'] = sd_time[0]
  710. sd_time_row_0 = turbine.loc[closest_index_0]
  711. # 找到离 sd_time[1] 最近的行的索引
  712. closest_index_1 = (turbine['time'] - sd_time[1]).abs().idxmin()
  713. turbine.at[closest_index_1, 'time'] = sd_time[1]
  714. sd_time_row_1 = turbine.loc[closest_index_1]
  715. # 切片 turbine,从 closest_index_0 到 closest_index_1
  716. turbines_processed[i] = turbine.iloc[closest_index_0:closest_index_1 + 1].reset_index(drop=True)
  717. sd_time_rows_turbine = pd.concat([pd.DataFrame([sd_time_row_0]), pd.DataFrame([sd_time_row_1])]
  718. , ignore_index=True)
  719. border_rows.append(sd_time_rows_turbine)
  720. return turbines_processed, turbines_scattered, border_rows, full_cycle, min_list
  721. def blade_shape(turbines_processed: List[pd.DataFrame]):
  722. """
  723. 计算叶片平均形状、叶片形状偏差。
  724. :param turbines_processed:叶片拟合曲线数据,来自data_normalize
  725. :return: 叶片平均形状、叶片形状偏差
  726. """
  727. print('正在进行叶片外形偏差计算......')
  728. row_counts = [df.shape[0] for df in turbines_processed]
  729. num_rows = min(row_counts)
  730. # 创建一个新的data.frame用于保存结果
  731. turbine_avg = pd.DataFrame(index=range(num_rows), columns=['time', 'distance'])
  732. turbine_diff = [pd.DataFrame(index=range(num_rows), columns=['time', 'distance']) for _ in turbines_processed]
  733. # 遍历每一行
  734. for i in range(num_rows):
  735. distances = [df.loc[i, 'distance'] for df in turbines_processed] # 获取每个data.frame的distance列的值
  736. avg_distance = sum(distances) / len(distances) # 计算distance列的平均值
  737. time_value = turbines_processed[0].loc[i, 'time'] # 获取time列的值
  738. turbine_avg.loc[i, 'time'] = time_value
  739. turbine_avg.loc[i, 'distance'] = avg_distance
  740. for j in range(len(distances)):
  741. distances[j] = distances[j] - avg_distance
  742. turbine_diff[j].loc[i, 'time'] = time_value
  743. turbine_diff[j].loc[i, 'distance'] = distances[j]
  744. return turbine_avg, turbine_diff
  745. def coordinate_normalize(tip_border_rows: List[pd.DataFrame], tip_angle):
  746. """
  747. 将叶尖测量数据和叶根、轮毂中心的测量原点归一化。
  748. :param tip_border_rows: 3个叶尖边缘数据
  749. :param tip_angle: 叶尖测量俯仰角
  750. :return: 归一化后叶尖数据,叶尖俯仰角
  751. """
  752. tip_angle1 = np.deg2rad(tip_angle)
  753. tip_angle_list = []
  754. for turbine in tip_border_rows:
  755. tip_angle_cal0 = ((np.sin(tip_angle1) * turbine['distance'] - 0.07608) /
  756. (np.cos(tip_angle1) * turbine['distance']))
  757. tip_angle_cal = np.arctan(tip_angle_cal0)
  758. turbine['distance'] = (turbine['distance']**2 + 0.0057881664 -
  759. 0.15216*turbine['distance']*np.sin(tip_angle1)) ** 0.5
  760. tip_angle_list.append(tip_angle_cal)
  761. tip_angle_new = float(np.mean(tip_angle_list))
  762. tip_angle_new1 = np.rad2deg(tip_angle_new)
  763. print('坐标转换后的新叶尖俯仰角: ' + str(tip_angle_new1))
  764. return tip_border_rows, tip_angle_new1
  765. def radius_cal(border_rows, meas_angle, cen_dist, cen_angle, angle_main, angle_rotate):
  766. """
  767. 计算测量点处的旋转半径。
  768. :param border_rows: 三个叶片的边界
  769. :param meas_angle: 回波俯仰角
  770. :param cen_dist: 轮毂中心距离
  771. :param cen_angle: 轮毂中心俯仰角
  772. :param angle_main: 主轴倾角
  773. :param angle_rotate: 锥角
  774. :return: 旋转半径
  775. """
  776. aero_dist = (pd.concat([df['distance'] for df in border_rows]).mean())
  777. cen_x = np.cos(np.deg2rad(cen_angle)) * cen_dist
  778. cen_y = np.sin(np.deg2rad(cen_angle)) * cen_dist
  779. aero_x = np.cos(np.deg2rad(meas_angle)) * aero_dist
  780. aero_y = np.sin(np.deg2rad(meas_angle)) * aero_dist
  781. theta_4 = np.tan(np.pi - np.deg2rad(angle_main))
  782. theta_5 = np.tan(np.pi/2 - np.deg2rad(angle_main) - np.deg2rad(angle_rotate))
  783. if theta_5 > 1000:
  784. radius = np.abs((cen_y - aero_y) - theta_4 * (cen_x - aero_x))
  785. print("轴向倾角与锥角相近,叶片垂直于地面")
  786. else:
  787. radius = (np.abs((theta_4 * (cen_x - aero_x) - (cen_y - aero_y))/(theta_4 - theta_5))
  788. * ((1 + theta_5 ** 2) ** 0.5))
  789. print('测量点旋转半径:' + str(radius))
  790. return radius
  791. def blade_angle_aero_dist(border_rows: List[pd.DataFrame], radius: float, full_cycle: int,
  792. tower_dist: float, v_angle: float):
  793. """
  794. 计算叶片相对桨距角和叶片净空距离。
  795. :param border_rows: 三个叶片的边界
  796. :param radius: 旋转半径
  797. :param full_cycle: 全周期
  798. :param tower_dist: 塔筒距离
  799. :param v_angle: 俯仰角度
  800. :return: 绝对桨距角,净空距离,叶片线速度
  801. """
  802. print('正在进行相对桨距角和叶片净空距离计算......')
  803. v_speed = 2 * np.pi * radius / full_cycle # 叶片线速度m/(1计时器单位)
  804. pitch_angle_list = []
  805. aero_dist_list = []
  806. cen_blade = []
  807. for turbine in border_rows:
  808. diff_time = turbine.iloc[1, 0] - turbine.iloc[0, 0]
  809. diff_len = (turbine.iloc[1, 1] - turbine.iloc[0, 1]) * np.cos(np.deg2rad(v_angle))
  810. mean_col2 = (turbine.iloc[1, 1] + turbine.iloc[0, 1]) / 2
  811. aero_dist = abs(mean_col2 - tower_dist) * np.cos(np.deg2rad(v_angle))
  812. pitch_angle = np.degrees(np.arctan(diff_len / (diff_time * v_speed)))
  813. print('单个叶片绝对桨距角' + str(pitch_angle))
  814. pitch_angle_list.append(pitch_angle)
  815. aero_dist_list.append(aero_dist)
  816. cen_blade.append(mean_col2)
  817. pitch_mean = np.mean(pitch_angle_list)
  818. pitch_angle_list = [angle - pitch_mean for angle in pitch_angle_list]
  819. pitch_angle_list.append(max(pitch_angle_list) - min(pitch_angle_list))
  820. aero_dist_list.append(np.mean(aero_dist_list))
  821. pitch_angle_list = [round(num, 2) for num in pitch_angle_list]
  822. aero_dist_list = [round(num, 2) for num in aero_dist_list]
  823. return pitch_angle_list, aero_dist_list, v_speed, cen_blade
  824. def plot_data(data, plot_type: str, data_name: str):
  825. """
  826. 绘制数据图表并保存为文件。
  827. :param data: 数据列表,每个元素是一个 DataFrame。
  828. :param plot_type: 图表类型,'line' 或 'scatter'。
  829. :param data_name: 数据名称,用于生成文件名。
  830. """
  831. print('正在画图......')
  832. time.sleep(1)
  833. save_path = "C:/Users/laiwe/Desktop/"
  834. save_name = fr"{data_name}_{plot_type}.png" # 生成文件名
  835. plt.figure(figsize=(300, 150))
  836. if plot_type == 'line':
  837. for df, color in zip(data, ['blue', 'green', 'red']):
  838. sns.lineplot(data=df, x=df.iloc[:, 0], y=df.iloc[:, 1], color=color, linewidth=8)
  839. elif plot_type == 'scatter':
  840. for df, (size, color) in zip(data, [(50, 'blue'), (25, 'green'), (10, 'red')]):
  841. sns.scatterplot(data=df, x=df.iloc[:, 0], y=df.iloc[:, 1], s=size, color=color)
  842. else:
  843. raise ValueError("plot_type must be either 'line' or 'scatter'")
  844. axy = plt.gca() # 获取当前坐标轴对象
  845. plt.grid(which='both', linewidth=2) # 设置网格线宽度为2
  846. axy.xaxis.set_major_locator(MaxNLocator(nbins=200)) # 设置x轴主刻度的最大数量为10
  847. axy.yaxis.set_major_locator(MaxNLocator(nbins=100)) # 设置y轴主刻度的最大数量为10
  848. plt.xlabel('时间', fontsize=100, fontweight='bold') # 添加x轴标签
  849. plt.ylabel('距离(m)', fontsize=100, fontweight='bold') # 添加y轴标签
  850. axy.tick_params(axis='x', labelsize=10, labelcolor='black', width=2) # 设置x轴刻度标签
  851. axy.tick_params(axis='y', labelsize=60, labelcolor='black', width=10) # 设置y轴刻度标签
  852. plt.savefig(save_path + save_name)
  853. plt.close()
  854. abs_path = os.path.abspath(save_name)
  855. print(f" {save_name} 已完成")
  856. return abs_path
  857. def find_param(path: str):
  858. """
  859. 根据文件路径获取参数
  860. """
  861. path = path.replace('\\', '/')
  862. last_slash_index = path.rfind('/')
  863. result = path[last_slash_index + 1:]
  864. underscore_indices = []
  865. start = 0
  866. while True:
  867. index = result.find('_', start)
  868. if index == -1:
  869. break
  870. underscore_indices.append(index)
  871. start = index + 1
  872. wind_name = result[: underscore_indices[0]]
  873. turbine_code = result[underscore_indices[0] + 1: underscore_indices[1]]
  874. time_code = result[underscore_indices[1] + 1: underscore_indices[2]]
  875. sampling_fq = int(result[underscore_indices[2] + 1: underscore_indices[3]])
  876. tunnel_1 = float(result[underscore_indices[3] + 1: underscore_indices[4]])
  877. tunnel_2 = float(result[underscore_indices[4] + 1: -4])
  878. dt = datetime.strptime(time_code, "%Y%m%d%H%M%S")
  879. standard_time_str = dt.strftime("%Y-%m-%d %H:%M:%S")
  880. return wind_name, turbine_code, standard_time_str, sampling_fq, tunnel_1, tunnel_2
  881. def blade_dist_distribute_cal(data_group: pd.DataFrame, start_points: pd.DataFrame, end_points: pd.DataFrame,
  882. tower_dist: float, v_angle: float, blade_cen_dist: list):
  883. """
  884. 计算每个叶片每个周期的转速和净空距离
  885. :param data_group: cycle_calculate计算完成后的数据。
  886. :param start_points: 所有每个周期开始点,叶片前缘突变点。
  887. :param end_points: 叶片后缘突变点。
  888. :param tower_dist: 塔筒距离。
  889. :param v_angle: 测量俯仰角度。
  890. :param blade_cen_dist: 叶片内部距离。
  891. """
  892. print('正在进行各周期净空距离分布计算......')
  893. time.sleep(1)
  894. combined_df_sorted = pd.concat([start_points, end_points]).sort_values(by='time')
  895. # 检查排序后的数据从start开始,end结束
  896. if combined_df_sorted.iloc[0].equals(end_points.iloc[0]):
  897. combined_df_sorted = combined_df_sorted.iloc[1:]
  898. if combined_df_sorted.iloc[-1].equals(start_points.iloc[-1]):
  899. combined_df_sorted = combined_df_sorted.iloc[:-1]
  900. combined_df_sorted.reset_index(drop=True, inplace=True)
  901. # 将 start_points 中的时间点转换为列表
  902. start_times = combined_df_sorted['time'].tolist()
  903. normalize_cycle = start_times[1] - start_times[0]
  904. tower_clearance = [pd.DataFrame() for _ in range(3)]
  905. # 遍历所有起始时间点
  906. for i in range(0, len(start_times) - 2, 2):
  907. # 获取当前起始和结束时间点
  908. start_time = start_times[i]
  909. end_time = start_times[i + 1]
  910. # 根据当前起始时间点和结束时间点对数据进行分段
  911. segment = data_group[(data_group['time'] > start_time) & (data_group['time'] <= end_time)]
  912. min_distance = segment['distance'].min()
  913. clearance = np.abs(tower_dist - min_distance - blade_cen_dist[i % 3]) * np.cos(np.deg2rad(v_angle))
  914. r_speed = round(60 / ((start_times[i + 2] - start_times[i]) * 3 / 5000000), 2)
  915. new_df = pd.DataFrame({
  916. 'r_speed': [r_speed],
  917. 'clearance': [clearance]
  918. })
  919. # 将结果添加到相应的 turbine 数据框中
  920. tower_clearance[i % 3] = pd.concat([tower_clearance[i % 3], new_df])
  921. tower_clearance = [df.sort_values(by='r_speed') for df in tower_clearance]
  922. return tower_clearance
  923. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-20_20250630223600_20_13.03_23.32.csv"
  924. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-20_20250630223849_20_17.89_21.07.csv"
  925. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-50_20250630223358_50_13.03_23.32.csv"
  926. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-50_20250630224408_50_17.89_21.07.csv"
  927. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-100_20250630222752_100_13.03_23.32.csv"
  928. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tf-100_20250630225119_100_17.89_21.07.csv"
  929. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-20_20250630231223_20_12.51_20.06.csv"
  930. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-20_20250630232052_20_15.36_18.17.csv"
  931. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-50_20250630231417_50_12.51_20.06.csv"
  932. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-50_20250630233420_50_15.35_18.16.csv"
  933. # locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-100_20250630231610_100_12.51_20.06.csv"
  934. # measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/20250728/gy_10-tff-100_20250630234012_100_15.35_18.16.csv"
  935. locate_path = "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/gytest/测试数据/gy_18-RF-1_20250701154647_50_23.70_40.01.csv"
  936. measure_path= "C:/Users/laiwe/Desktop/风电/激光测量/测试数据/gytest/测试数据/gy_18-RF-2_20250701155057_50_29.30_36.78.csv"
  937. start_t = time.time() # 记录开始时间
  938. data_path = [locate_path, measure_path, 5, 6]
  939. list_1 = data_analyse(data_path)
  940. # print(list_1)
  941. print(f"耗时: {time.time() - start_t:.2f} 秒")