tsrAnalyst.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. import os
  2. import pandas as pd
  3. import math
  4. import numpy as np
  5. from plotly.subplots import make_subplots
  6. import plotly.express as px
  7. import pandas as pd
  8. import plotly.graph_objects as go
  9. import seaborn as sns
  10. from matplotlib.ticker import MultipleLocator
  11. from behavior.analystWithGoodPoint import AnalystWithGoodPoint
  12. from utils.directoryUtil import DirectoryUtil as dir
  13. from algorithmContract.confBusiness import *
  14. from algorithmContract.contract import Contract
  15. class TSRAnalyst(AnalystWithGoodPoint):
  16. """
  17. 风电机组叶尖速比分析
  18. """
  19. def typeAnalyst(self):
  20. return "tsr"
  21. def turbinesAnalysis(self, outputAnalysisDir, conf: Contract, turbineCodes):
  22. dictionary = self.processTurbineData(turbineCodes, conf, [
  23. Field_DeviceCode, Field_Time, Field_WindSpeed, Field_ActiverPower,Field_RotorSpeed,Field_GeneratorSpeed])
  24. dataFrameOfTurbines = self.userDataFrame(
  25. dictionary, conf.dataContract.configAnalysis, self)
  26. # 检查所需列是否存在
  27. required_columns = {Field_WindSpeed, Field_RotorSpeed,Field_PowerFloor,Field_GeneratorSpeed}
  28. if not required_columns.issubset(dataFrameOfTurbines.columns):
  29. raise ValueError(f"DataFrame缺少必要的列。需要的列有: {required_columns}")
  30. turbrineInfos = self.common.getTurbineInfos(
  31. conf.dataContract.dataFilter.powerFarmID, turbineCodes, self.turbineInfo)
  32. groupedOfTurbineModel = turbrineInfos.groupby(Field_MillTypeCode)
  33. returnDatas = []
  34. for turbineModelCode, group in groupedOfTurbineModel:
  35. currTurbineCodes = group[Field_CodeOfTurbine].unique().tolist()
  36. currTurbineModeInfo = self.common.getTurbineModelByCode(
  37. turbineModelCode, self.turbineModelInfo)
  38. currDataFrameOfTurbines = dataFrameOfTurbines[dataFrameOfTurbines[Field_CodeOfTurbine].isin(
  39. currTurbineCodes)]
  40. #创建一个与currDataFrameOfTurbines相同的dataFrameMerge
  41. dataFrameMerge=currDataFrameOfTurbines.copy()
  42. # return self.plot_tsr_distribution(self.tsr(dataFrameMerge), outputAnalysisDir, conf)
  43. dataFrameMerge[Field_PowerFarmName] = self.currPowerFarmInfo.loc[Field_PowerFarmName]
  44. # Calculate 'power_floor'
  45. dataFrameMerge[Field_PowerFloor] = (
  46. dataFrameMerge[Field_ActiverPower] / 10).astype(int) * 10
  47. # Ensure the necessary columns are of float type
  48. dataFrameMerge[Field_WindSpeed] = dataFrameMerge[Field_WindSpeed].astype(float)
  49. dataFrameMerge[Field_RotorSpeed] = dataFrameMerge[Field_RotorSpeed].astype(float)
  50. dataFrameMerge[Field_GeneratorSpeed] = dataFrameMerge[Field_GeneratorSpeed].astype(float)
  51. # Group by 'power_floor' and calculate median, max, and min of TSR
  52. grouped = dataFrameMerge.groupby([Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine]).agg({
  53. Field_WindSpeed: 'median',
  54. Field_RotorSpeed: 'median',
  55. Field_GeneratorSpeed: 'median',
  56. Field_TSR: ['median', 'max', 'min'],
  57. Field_PowerFarmName: 'max'
  58. }).reset_index()
  59. # Rename columns for clarity post aggregation
  60. grouped.columns = [Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine, Field_WindSpeed,
  61. Field_RotorSpeed, Field_GeneratorSpeed, Field_TSR, Field_TSRMax, Field_TSRMin, Field_PowerFarmName]
  62. # Sort by 'power_floor'
  63. grouped = grouped.sort_values(by=[Field_CodeOfTurbine, Field_PowerFloor])
  64. returnData = self.plot_tsr_distribution(
  65. grouped, outputAnalysisDir, conf, currTurbineModeInfo)
  66. returnDatas.append(returnData)
  67. returnResult = pd.concat(returnDatas, ignore_index=True)
  68. return returnResult
  69. #------------------------------------------
  70. # dictionary = self.processTurbineData(turbineCodes,conf,[Field_DeviceCode,Field_Time,Field_WindSpeed,Field_ActiverPower,Field_RotorSpeed,Field_GeneratorSpeed])
  71. # dataFrameMerge = self.userDataFrame(dictionary,conf.dataContract.configAnalysis,self)
  72. # # return self.plot_tsr_distribution(self.tsr(dataFrameMerge), outputAnalysisDir, conf)
  73. # dataFrameMerge[Field_PowerFarmName] = self.currPowerFarmInfo.loc[Field_PowerFarmName]
  74. # # Calculate 'power_floor'
  75. # dataFrameMerge[Field_PowerFloor] = (
  76. # dataFrameMerge[Field_ActiverPower] / 10).astype(int) * 10
  77. # # Ensure the necessary columns are of float type
  78. # dataFrameMerge[Field_WindSpeed] = dataFrameMerge[Field_WindSpeed].astype(float)
  79. # dataFrameMerge[Field_RotorSpeed] = dataFrameMerge[Field_RotorSpeed].astype(float)
  80. # dataFrameMerge[Field_GeneratorSpeed] = dataFrameMerge[Field_GeneratorSpeed].astype(float)
  81. # # Group by 'power_floor' and calculate median, max, and min of TSR
  82. # grouped = dataFrameMerge.groupby([Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine]).agg({
  83. # Field_WindSpeed: 'median',
  84. # Field_RotorSpeed: 'median',
  85. # Field_GeneratorSpeed: 'median',
  86. # Field_TSR: ['median', 'max', 'min'],
  87. # Field_PowerFarmName: 'max'
  88. # }).reset_index()
  89. # # Rename columns for clarity post aggregation
  90. # grouped.columns = [Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine, Field_WindSpeed,
  91. # Field_RotorSpeed, Field_GeneratorSpeed, Field_TSR, Field_TSRMax, Field_TSRMin, Field_PowerFarmName]
  92. # # Sort by 'power_floor'
  93. # grouped = grouped.sort_values(by=[Field_CodeOfTurbine, Field_PowerFloor])
  94. # return self.plot_tsr_distribution(grouped, outputAnalysisDir, conf)
  95. def plot_tsr_distribution(self, dataFrameMerge: pd.DataFrame, outputAnalysisDir, conf: Contract, turbineModelInfo: pd.Series):
  96. """
  97. Generates tsr distribution plots for turbines in a wind farm.
  98. Parameters:
  99. - csvFileDirOfCp: str, path to the directory containing input CSV files.
  100. - farm_name: str, name of the wind farm.
  101. - encoding: str, encoding of the input CSV files. Defaults to 'utf-8'.
  102. """
  103. x_name = Field_PowerFloor
  104. y_name = Field_TSR
  105. upLimitOfTSR = 20
  106. # 创建一个列表来存储各个风电机组的数据
  107. turbine_data_list = []
  108. # 绘制全场TSR分布图
  109. fig = go.Figure()
  110. # colors = px.colors.sequential.Turbo
  111. # 遍历不同的turbine来添加线条
  112. for turbine in dataFrameMerge[Field_NameOfTurbine].unique():
  113. turbine_data = dataFrameMerge[dataFrameMerge[Field_NameOfTurbine] == turbine]
  114. fig.add_trace(go.Scatter(x=turbine_data[x_name], y=turbine_data[y_name],
  115. mode='lines',
  116. # line=dict(color=colors[idx % len(colors)]),
  117. name=turbine))
  118. # 提取数据
  119. turbine_data_total = {
  120. "engineName": turbine,
  121. "engineCode": turbine_data[Field_CodeOfTurbine].iloc[0],
  122. "xData": turbine_data[x_name].tolist(),
  123. "yData": turbine_data[y_name].tolist(),
  124. }
  125. turbine_data_list.append(turbine_data_total)
  126. fig.update_layout(
  127. title={
  128. "text": f'叶尖速比分布-{turbineModelInfo[Field_MachineTypeCode]}',
  129. 'x': 0.5
  130. },
  131. xaxis=dict(
  132. title='最小功率',
  133. dtick=200,
  134. tickangle=-45,
  135. range=[0, 1800]),
  136. yaxis=dict(
  137. title='叶尖速比',
  138. dtick=self.axisStepTSR,
  139. range=[self.axisLowerLimitTSR,
  140. self.axisUpperLimitTSR]
  141. ),
  142. legend=dict(
  143. orientation="h", # Horizontal orientation
  144. xanchor="center", # Anchor the legend to the center
  145. x=0.5, # Position legend at the center of the x-axis
  146. y=-0.2, # Position legend below the x-axis
  147. # itemsizing='constant', # Keep the size of the legend entries constant
  148. # itemwidth=50
  149. )
  150. )
  151. # 设置x轴标签旋转
  152. fig.update_xaxes(tickangle=-45)
  153. engineTypeCode = turbineModelInfo.get(Field_MillTypeCode, "")
  154. if isinstance(engineTypeCode, pd.Series):
  155. engineTypeCode = engineTypeCode.iloc[0]
  156. engineTypeName = turbineModelInfo.get(Field_MachineTypeCode, "")
  157. if isinstance(engineTypeName, pd.Series):
  158. engineTypeName = engineTypeName.iloc[0]
  159. # 构建最终的JSON对象
  160. json_output = {
  161. "analysisTypeCode": "风电机组叶尖速比分析",
  162. "typecode": turbineModelInfo[Field_MillTypeCode],
  163. "engineCode": engineTypeCode,
  164. "engineTypeName": engineTypeName,
  165. "title": f'叶尖速比分布-{turbineModelInfo[Field_MachineTypeCode]}',
  166. "xaixs": "最小功率(kW)",
  167. "yaixs": "叶尖速比",
  168. "data": turbine_data_list
  169. }
  170. # 保存图形
  171. # fig.write_image(csvFileDirOfCp + r"/{}-TSR-Distibute.png".format(confData.farm_name),format='png',width=800, height=500,scale=3)
  172. # fig.show()
  173. # 保存HTML
  174. # htmlFileName = f"{dataFrameMerge[Field_PowerFarmName].iloc[0]}-TSR-Distribution-{turbineModelInfo[Field_MillTypeCode]}.html"
  175. #htmlFilePath = os.path.join(outputAnalysisDir, htmlFileName)
  176. #fig.write_html(htmlFilePath)
  177. result_rows = []
  178. # result_rows.append({
  179. # Field_Return_TypeAnalyst: self.typeAnalyst(),
  180. # Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  181. # Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  182. # Field_CodeOfTurbine: 'total',
  183. # Field_Return_FilePath: htmlFilePath,
  184. # Field_Return_IsSaveDatabase: True
  185. # })
  186. # 将JSON对象保存到文件
  187. output_json_path = os.path.join(outputAnalysisDir, f"{dataFrameMerge[Field_PowerFarmName].iloc[0]}-TSR-Distribution-{turbineModelInfo[Field_MillTypeCode]}.json")
  188. with open(output_json_path, 'w', encoding='utf-8') as f:
  189. import json
  190. json.dump(json_output, f, ensure_ascii=False, indent=4)
  191. # 如果需要返回DataFrame,可以包含文件路径
  192. result_rows.append({
  193. Field_Return_TypeAnalyst: self.typeAnalyst(),
  194. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  195. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  196. Field_CodeOfTurbine: 'total',
  197. Field_MillTypeCode: turbineModelInfo[Field_MillTypeCode],
  198. Field_Return_FilePath: output_json_path,
  199. Field_Return_IsSaveDatabase: True
  200. })
  201. # 绘制每个设备的TSR分布图
  202. for name, group in dataFrameMerge.groupby([Field_NameOfTurbine, Field_CodeOfTurbine]):
  203. fig = go.Figure()
  204. # 创建一个列表来存储各个风电机组的数据
  205. turbine_data_list_each = []
  206. # 循环绘制turbine的线条
  207. for turbine in dataFrameMerge[Field_NameOfTurbine].unique():
  208. turbine_data = dataFrameMerge[dataFrameMerge[Field_NameOfTurbine] == turbine]
  209. fig.add_trace(go.Scatter(x=turbine_data[x_name],
  210. y=turbine_data[y_name],
  211. mode='lines',
  212. line=dict(color='lightgrey'),
  213. showlegend=False))
  214. # 提取数据
  215. turbine_data_each = {
  216. "engineName": turbine,
  217. "engineCode": turbine_data[Field_CodeOfTurbine].iloc[0],
  218. "xData": turbine_data[x_name].tolist(),
  219. "yData": turbine_data[y_name].tolist(),
  220. }
  221. turbine_data_list_each.append(turbine_data_each)
  222. fig.add_trace(go.Scatter(x=group[x_name],
  223. y=group[y_name],
  224. mode='lines',
  225. line=dict(color='darkblue'),
  226. showlegend=False))
  227. fig.update_layout(
  228. title={"text": '机组: {}'.format(name[0])},
  229. # margin=dict(
  230. # t=35, # 顶部 margin,减小这个值可以使标题更靠近图形
  231. # l=60, # 左侧 margin
  232. # r=60, # 右侧 margin
  233. # b=40, # 底部 margin
  234. # ),
  235. xaxis=dict(
  236. title='功率',
  237. dtick=200,
  238. tickangle=-45,
  239. range=[0, 1800]),
  240. yaxis=dict(
  241. title='叶尖速比',
  242. dtick=self.axisStepTSR,
  243. range=[self.axisLowerLimitTSR,
  244. self.axisUpperLimitTSR]
  245. )
  246. )
  247. fig.update_xaxes(tickangle=-45)
  248. engineTypeCode = turbineModelInfo.get(Field_MillTypeCode, "")
  249. if isinstance(engineTypeCode, pd.Series):
  250. engineTypeCode = engineTypeCode.iloc[0]
  251. engineTypeName = turbineModelInfo.get(Field_MachineTypeCode, "")
  252. if isinstance(engineTypeName, pd.Series):
  253. engineTypeName = engineTypeName.iloc[0]
  254. # 构建最终的JSON对象
  255. json_output = {
  256. "analysisTypeCode": "风电机组叶尖速比分析",
  257. "typecode": turbineModelInfo[Field_MillTypeCode],
  258. "engineCode": engineTypeCode,
  259. "engineTypeName": engineTypeName,
  260. "title": f'机组:{format(name[0])}',
  261. "xaixs": "功率(kW)",
  262. "yaixs": "叶尖速比",
  263. "data": turbine_data_list_each
  264. }
  265. # 保存图像
  266. pngFileName = f"{name[0]}.png"
  267. pngFilePath = os.path.join(outputAnalysisDir, pngFileName)
  268. fig.write_image(pngFilePath, scale=3)
  269. # 保存HTML
  270. # htmlFileName = f"{name[0]}.html"
  271. # htmlFilePath = os.path.join(outputAnalysisDir, htmlFileName)
  272. # fig.write_html(htmlFilePath)
  273. result_rows.append({
  274. Field_Return_TypeAnalyst: self.typeAnalyst(),
  275. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  276. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  277. Field_CodeOfTurbine: name[1],
  278. Field_Return_FilePath: pngFilePath,
  279. Field_Return_IsSaveDatabase: False
  280. })
  281. # 将JSON对象保存到文件
  282. output_json_path_each = os.path.join(outputAnalysisDir, f"{name[0]}.json")
  283. with open(output_json_path_each, 'w', encoding='utf-8') as f:
  284. import json
  285. json.dump(json_output, f, ensure_ascii=False, indent=4)
  286. # 如果需要返回DataFrame,可以包含文件路径
  287. result_rows.append({
  288. Field_Return_TypeAnalyst: self.typeAnalyst(),
  289. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  290. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  291. Field_CodeOfTurbine: name[1],
  292. Field_Return_FilePath: output_json_path_each,
  293. Field_Return_IsSaveDatabase: True
  294. })
  295. # result_rows.append({
  296. # Field_Return_TypeAnalyst: self.typeAnalyst(),
  297. # Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  298. # Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  299. # Field_CodeOfTurbine: name[1],
  300. # Field_Return_FilePath: htmlFilePath,
  301. # Field_Return_IsSaveDatabase: True
  302. # })
  303. result_df = pd.DataFrame(result_rows)
  304. return result_df