tsrCpPowerAnalyst.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. import os
  2. import pandas as pd
  3. import math
  4. import numpy as np
  5. from plotly.subplots import make_subplots
  6. import plotly.express as px
  7. import pandas as pd
  8. import plotly.graph_objects as go
  9. import seaborn as sns
  10. from matplotlib.ticker import MultipleLocator
  11. from behavior.analystWithGoodPoint import AnalystWithGoodPoint
  12. from utils.directoryUtil import DirectoryUtil as dir
  13. from algorithmContract.confBusiness import *
  14. from algorithmContract.contract import Contract
  15. class TSRCpPowerAnalyst(AnalystWithGoodPoint):
  16. """
  17. 风电机组叶尖速比-Cp-功率分析
  18. """
  19. def typeAnalyst(self):
  20. return "tsr_cp_power"
  21. def turbinesAnalysis(self, outputAnalysisDir, conf: Contract, turbineCodes):
  22. dictionary = self.processTurbineData(turbineCodes,conf,[Field_DeviceCode,Field_Time,Field_WindSpeed,Field_ActiverPower,Field_RotorSpeed, Field_GeneratorSpeed])
  23. dataFrameOfTurbines = self.userDataFrame(
  24. dictionary, conf.dataContract.configAnalysis, self)
  25. # 检查所需列是否存在
  26. required_columns = {Field_WindSpeed, Field_RotorSpeed,Field_PowerFloor,Field_GeneratorSpeed,Field_Cp}
  27. if not required_columns.issubset(dataFrameOfTurbines.columns):
  28. raise ValueError(f"DataFrame缺少必要的列。需要的列有: {required_columns}")
  29. turbrineInfos = self.common.getTurbineInfos(
  30. conf.dataContract.dataFilter.powerFarmID, turbineCodes, self.turbineInfo)
  31. groupedOfTurbineModel = turbrineInfos.groupby(Field_MillTypeCode)
  32. returnDatas = []
  33. for turbineModelCode, group in groupedOfTurbineModel:
  34. currTurbineCodes = group[Field_CodeOfTurbine].unique().tolist()
  35. currTurbineModeInfo = self.common.getTurbineModelByCode(
  36. turbineModelCode, self.turbineModelInfo)
  37. currDataFrameOfTurbines = dataFrameOfTurbines[dataFrameOfTurbines[Field_CodeOfTurbine].isin(
  38. currTurbineCodes)]
  39. #创建一个与dataFrameOfTurbines相同的dataFrameMerge
  40. dataFrameMerge=currDataFrameOfTurbines.copy()
  41. # return self.plot_tsr_distribution(self.tsr(dataFrameMerge), outputAnalysisDir, conf)
  42. dataFrameMerge[Field_PowerFarmName] = self.currPowerFarmInfo.loc[Field_PowerFarmName]
  43. # Calculate 'power_floor'
  44. dataFrameMerge[Field_PowerFloor] = (
  45. dataFrameMerge[Field_ActiverPower] / 10).astype(int) * 10
  46. # Ensure the necessary columns are of float type
  47. dataFrameMerge[Field_WindSpeed] = dataFrameMerge[Field_WindSpeed].astype(float)
  48. dataFrameMerge[Field_RotorSpeed] = dataFrameMerge[Field_RotorSpeed].astype(float)
  49. dataFrameMerge[Field_Cp] = dataFrameMerge[Field_Cp].astype(float)
  50. dataFrameMerge[Field_GeneratorSpeed] = dataFrameMerge[Field_GeneratorSpeed].astype(float)
  51. max_cutin = self.turbineModelInfo[Field_CutInWS].max()
  52. min_rated = self.turbineModelInfo[Field_RatedWindSpeed].min()
  53. dataFrameMerge = dataFrameMerge[(dataFrameMerge[Field_WindSpeed] > max_cutin) & (dataFrameMerge[Field_WindSpeed] < min_rated)]
  54. # Group by 'power_floor' and calculate median, max, and min of TSR
  55. dataFrameMerge[Field_TSRModified] = dataFrameMerge[Field_TSR] / (dataFrameMerge[Field_Cp] ** (1/3))
  56. grouped = dataFrameMerge.groupby([Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine]).agg({
  57. Field_WindSpeed: 'median',
  58. Field_RotorSpeed: 'median',
  59. Field_GeneratorSpeed: 'median',
  60. 'tsr_modified': 'median',
  61. Field_Cp: 'median',
  62. Field_PowerFarmName: 'max'
  63. }).reset_index()
  64. # Rename columns for clarity post aggregation
  65. grouped.columns = [Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine, Field_WindSpeed,
  66. Field_RotorSpeed, Field_GeneratorSpeed, Field_TSRModified, Field_Cp, Field_PowerFarmName]
  67. # Sort by 'power_floor'
  68. grouped = grouped.sort_values(by=[Field_CodeOfTurbine, Field_PowerFloor])
  69. returnData = self.plot_tsrcp_distribution(
  70. grouped, outputAnalysisDir, conf, currTurbineModeInfo)
  71. returnDatas.append(returnData)
  72. returnResult = pd.concat(returnDatas, ignore_index=True)
  73. return returnResult
  74. # ---------------------------------------------------
  75. # dataFrameMerge = self.userDataFrame(dictionary,conf.dataContract.configAnalysis,self)
  76. # # return self.plot_tsr_distribution(self.tsr(dataFrameMerge), outputAnalysisDir, conf)
  77. # dataFrameMerge[Field_PowerFarmName] = self.currPowerFarmInfo.loc[Field_PowerFarmName]
  78. # # Calculate 'power_floor'
  79. # dataFrameMerge[Field_PowerFloor] = (
  80. # dataFrameMerge[Field_ActiverPower] / 10).astype(int) * 10
  81. # # Ensure the necessary columns are of float type
  82. # dataFrameMerge[Field_WindSpeed] = dataFrameMerge[Field_WindSpeed].astype(float)
  83. # dataFrameMerge[Field_RotorSpeed] = dataFrameMerge[Field_RotorSpeed].astype(float)
  84. # dataFrameMerge[Field_Cp] = dataFrameMerge[Field_Cp].astype(float)
  85. # dataFrameMerge[Field_GeneratorSpeed] = dataFrameMerge[Field_GeneratorSpeed].astype(float)
  86. # max_cutin = self.turbineModelInfo[Field_CutInWS].max()
  87. # min_rated = self.turbineModelInfo[Field_RatedWindSpeed].min()
  88. # dataFrameMerge = dataFrameMerge[(dataFrameMerge[Field_WindSpeed] > max_cutin) & (dataFrameMerge[Field_WindSpeed] < min_rated)]
  89. # # Group by 'power_floor' and calculate median, max, and min of TSR
  90. # dataFrameMerge[Field_TSRModified] = dataFrameMerge[Field_TSR] / (dataFrameMerge[Field_Cp] ** (1/3))
  91. # grouped = dataFrameMerge.groupby([Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine]).agg({
  92. # Field_WindSpeed: 'median',
  93. # Field_RotorSpeed: 'median',
  94. # Field_GeneratorSpeed: 'median',
  95. # 'tsr_modified': 'median',
  96. # Field_Cp: 'median',
  97. # Field_PowerFarmName: 'max'
  98. # }).reset_index()
  99. # # Rename columns for clarity post aggregation
  100. # grouped.columns = [Field_PowerFloor, Field_CodeOfTurbine, Field_NameOfTurbine, Field_WindSpeed,
  101. # Field_RotorSpeed, Field_GeneratorSpeed, Field_TSRModified, Field_Cp, Field_PowerFarmName]
  102. # # Sort by 'power_floor'
  103. # grouped = grouped.sort_values(by=[Field_CodeOfTurbine, Field_PowerFloor])
  104. # return self.plot_tsrcp_distribution(grouped, outputAnalysisDir, conf)
  105. def plot_tsrcp_distribution(self, dataFrameMerge: pd.DataFrame, outputAnalysisDir, conf: Contract, turbineModelInfo: pd.Series):
  106. """
  107. Generates tsr distribution plots for turbines in a wind farm.
  108. Parameters:
  109. - outputAnalysisDir: str, path to the directory containing input CSV files.
  110. - farm_name: str, name of the wind farm.
  111. - encoding: str, encoding of the input CSV files. Defaults to 'utf-8'.
  112. """
  113. x_name = Field_PowerFloor
  114. y_name = Field_TSRModified
  115. upLimitOfTSR = 20
  116. # 绘制全场TSR分布图
  117. fig = go.Figure()
  118. # colors = px.colors.sequential.Turbo
  119. # 创建一个列表来存储各个风电机组的数据
  120. turbine_data_list = []
  121. # 遍历不同的turbine来添加线条
  122. for turbine in dataFrameMerge[Field_NameOfTurbine].unique():
  123. turbine_data = dataFrameMerge[dataFrameMerge[Field_NameOfTurbine] == turbine]
  124. fig.add_trace(go.Scatter(x=turbine_data[x_name], y=turbine_data[y_name],
  125. mode='lines',
  126. line=dict(width=1.2),
  127. # line=dict(color=colors[idx % len(colors)]),
  128. name=turbine))
  129. # 提取数据
  130. turbine_data_total = {
  131. "engineName": turbine,
  132. "engineCode": turbine_data[Field_CodeOfTurbine].iloc[0],
  133. "xData": turbine_data[x_name].tolist(),
  134. "yData": turbine_data[y_name].tolist(),
  135. }
  136. turbine_data_list.append(turbine_data_total)
  137. fig.update_layout(
  138. title={
  139. "text": f'叶尖速比-风能利用系数分析-功率分布图-{turbineModelInfo[Field_MachineTypeCode]}',
  140. 'x': 0.5
  141. },
  142. xaxis=dict(
  143. title='功率',
  144. dtick=200,
  145. tickangle=-45,
  146. range=[0, 1800]),
  147. yaxis=dict(
  148. title= '叶尖速比/风能利用系数分析^(1/3)', # r"$\frac{TSR}{Cp^{1/3}}$" # 仅在.png格式下正确显示
  149. dtick=self.axisStepTSR,
  150. range=[self.axisLowerLimitTSR,
  151. self.axisUpperLimitTSR]
  152. ),
  153. legend=dict(
  154. orientation="h", # Horizontal orientation
  155. xanchor="center", # Anchor the legend to the center
  156. x=0.5, # Position legend at the center of the x-axis
  157. y=-0.2, # Position legend below the x-axis
  158. # itemsizing='constant', # Keep the size of the legend entries constant
  159. # itemwidth=50
  160. )
  161. )
  162. # 设置x轴标签旋转
  163. fig.update_xaxes(tickangle=-45)
  164. # 保存图形
  165. # fig.write_image(outputAnalysisDir + r"/{}-TSR-Distibute.png".format(confData.farm_name),format='png',width=800, height=500,scale=3)
  166. # fig.show()
  167. engineTypeCode = turbineModelInfo.get(Field_MillTypeCode, "")
  168. if isinstance(engineTypeCode, pd.Series):
  169. engineTypeCode = engineTypeCode.iloc[0]
  170. engineTypeName = turbineModelInfo.get(Field_MachineTypeCode, "")
  171. if isinstance(engineTypeName, pd.Series):
  172. engineTypeName = engineTypeName.iloc[0]
  173. # 构建最终的JSON对象
  174. json_output = {
  175. "analysisTypeCode": "叶尖速比-风能利用系数分析-功率分布",
  176. "typecode": turbineModelInfo[Field_MillTypeCode],
  177. "engineCode": engineTypeCode,
  178. "engineTypeName": engineTypeName,
  179. "title": f'叶尖速比-风能利用系数分析-功率分布图-{turbineModelInfo[Field_MachineTypeCode]}',
  180. "xaixs": "功率(kW)",
  181. "yaixs": "叶尖速比/风能利用系数分析^(1/3)",
  182. "data": turbine_data_list
  183. }
  184. # 保存HTML
  185. # htmlFileName = f"{dataFrameMerge[Field_PowerFarmName].iloc[0]}-TSR-Cp-Power-Distribution.html"
  186. # htmlFileName = f"{turbineModelInfo[Field_MillTypeCode]}-TSR-Cp-Power-Distribution.html"
  187. #htmlFilePath = os.path.join(outputAnalysisDir, htmlFileName)
  188. # fig.write_html(htmlFilePath)
  189. result_rows = []
  190. # result_rows.append({
  191. # Field_Return_TypeAnalyst: self.typeAnalyst(),
  192. # Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  193. # Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  194. # Field_CodeOfTurbine: 'total',
  195. # Field_Return_FilePath: htmlFilePath,
  196. # Field_Return_IsSaveDatabase: True
  197. # })
  198. # 将JSON对象保存到文件
  199. output_json_path = os.path.join(outputAnalysisDir, f"{turbineModelInfo[Field_MillTypeCode]}.json")
  200. with open(output_json_path, 'w', encoding='utf-8') as f:
  201. import json
  202. json.dump(json_output, f, ensure_ascii=False, indent=4)
  203. # 如果需要返回DataFrame,可以包含文件路径
  204. result_rows.append({
  205. Field_Return_TypeAnalyst: self.typeAnalyst(),
  206. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  207. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  208. Field_CodeOfTurbine: 'total',
  209. Field_MillTypeCode: turbineModelInfo[Field_MillTypeCode],
  210. Field_Return_FilePath: output_json_path,
  211. Field_Return_IsSaveDatabase: True
  212. })
  213. # 绘制每个设备的TSR分布图
  214. for name, group in dataFrameMerge.groupby([Field_NameOfTurbine, Field_CodeOfTurbine]):
  215. fig = go.Figure()
  216. # 创建一个列表来存储各个风电机组的数据
  217. turbine_data_list_each = []
  218. # 循环绘制turbine的线条
  219. for turbine in dataFrameMerge[Field_NameOfTurbine].unique():
  220. turbine_data = dataFrameMerge[dataFrameMerge[Field_NameOfTurbine] == turbine]
  221. fig.add_trace(go.Scatter(x=turbine_data[x_name],
  222. y=turbine_data[y_name],
  223. mode='lines',
  224. line=dict(color='lightgrey', width=1.2),
  225. showlegend=False))
  226. # 提取数据
  227. turbine_data_each = {
  228. "engineName": turbine,
  229. "engineCode": turbine_data[Field_CodeOfTurbine].iloc[0],
  230. "xData": turbine_data[x_name].tolist(),
  231. "yData": turbine_data[y_name].tolist(),
  232. }
  233. turbine_data_list_each.append(turbine_data_each)
  234. fig.add_trace(go.Scatter(x=group[x_name],
  235. y=group[y_name],
  236. mode='lines',
  237. line=dict(color='darkblue', width=1.5),
  238. showlegend=False))
  239. fig.update_layout(
  240. title={"text": '机组: {}'.format(name[0])},
  241. # margin=dict(
  242. # t=35, # 顶部 margin,减小这个值可以使标题更靠近图形
  243. # l=60, # 左侧 margin
  244. # r=60, # 右侧 margin
  245. # b=40, # 底部 margin
  246. # ),
  247. xaxis=dict(
  248. title='功率',
  249. dtick=200,
  250. tickangle=-45,
  251. range=[0, 1800]),
  252. yaxis=dict(
  253. title= '叶尖速比/风能利用系数分析^(1/3)', # r"$\frac{TSR}{Cp^{1/3}}$" # 仅在.png格式下正确显示
  254. dtick=self.axisStepTSR,
  255. range=[self.axisLowerLimitTSR,
  256. self.axisUpperLimitTSR]
  257. )
  258. )
  259. fig.update_xaxes(tickangle=-45)
  260. # 保存图像
  261. pngFileName = f"{name[0]}.png"
  262. pngFilePath = os.path.join(outputAnalysisDir, pngFileName)
  263. fig.write_image(pngFilePath, scale=3)
  264. engineTypeCode = turbineModelInfo.get(Field_MillTypeCode, "")
  265. if isinstance(engineTypeCode, pd.Series):
  266. engineTypeCode = engineTypeCode.iloc[0]
  267. engineTypeName = turbineModelInfo.get(Field_MachineTypeCode, "")
  268. if isinstance(engineTypeName, pd.Series):
  269. engineTypeName = engineTypeName.iloc[0]
  270. # 构建最终的JSON对象
  271. json_output = {
  272. "analysisTypeCode": "叶尖速比-风能利用系数分析-功率分布",
  273. "typecode": turbineModelInfo[Field_MillTypeCode],
  274. "engineCode": engineTypeCode,
  275. "engineTypeName": engineTypeName,
  276. "title": f'机组:{format(name[0])}',
  277. "xaixs": "功率(kW)",
  278. "yaixs": "叶尖速比/风能利用系数分析^(1/3)",
  279. "data": turbine_data_list_each
  280. }
  281. # 保存HTML
  282. # htmlFileName = f"{name[0]}.html"
  283. # htmlFilePath = os.path.join(outputAnalysisDir, htmlFileName)
  284. #fig.write_html(htmlFilePath)
  285. result_rows.append({
  286. Field_Return_TypeAnalyst: self.typeAnalyst(),
  287. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  288. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  289. Field_CodeOfTurbine: name[1],
  290. Field_Return_FilePath: pngFilePath,
  291. Field_Return_IsSaveDatabase: False
  292. })
  293. # 将JSON对象保存到文件
  294. output_json_path_each = os.path.join(outputAnalysisDir, f"{name[0]}.json")
  295. with open(output_json_path_each, 'w', encoding='utf-8') as f:
  296. import json
  297. json.dump(json_output, f, ensure_ascii=False, indent=4)
  298. # 如果需要返回DataFrame,可以包含文件路径
  299. result_rows.append({
  300. Field_Return_TypeAnalyst: self.typeAnalyst(),
  301. Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  302. Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  303. Field_CodeOfTurbine: name[1],
  304. Field_Return_FilePath: output_json_path_each,
  305. Field_Return_IsSaveDatabase: True
  306. })
  307. # result_rows.append({
  308. # Field_Return_TypeAnalyst: self.typeAnalyst(),
  309. # Field_PowerFarmCode: conf.dataContract.dataFilter.powerFarmID,
  310. # Field_Return_BatchCode: conf.dataContract.dataFilter.dataBatchNum,
  311. # Field_CodeOfTurbine: name[1],
  312. # Field_Return_FilePath: htmlFilePath,
  313. # Field_Return_IsSaveDatabase: True
  314. # })
  315. result_df = pd.DataFrame(result_rows)
  316. return result_df