分析预测设备最佳效益
# 分析预测设备最佳效益
# 1. 说明
- 加载最新训练的模型权重,用作后续的分析预测。
- 根据用户提供的分析范围,输出分析预测的结果为文件。
- 将分析预测的结果文件传输至FastWeb,同时更新分析预测结果信息。
# 2. 设计Python程序
设计的Python示例程序如下:
# 分析预测
import torch
import torch.nn as nn
#import torch.optim as optim
import json
#import websockets
import requests
#from aiohttp import web
#from pydantic import BaseModel
import csv
import random
import datetime
#import asyncio
import os
import logging
from logging.handlers import TimedRotatingFileHandler
import sys
#import gc
fastweb_url='http://192.168.0.201:8803'
# 检测目录是否存在,如不存在则创建新目录
def create_directory_if_not_exists(directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
#logger.info(f"目录 '{directory_path}' 创建成功!")
# 定义神经网络模型
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.hidden_layers = nn.Sequential(
nn.Linear(4, 100),
nn.ReLU()
)
for _ in range(9):
self.hidden_layers.add_module(f'hidden_{_+1}', nn.Linear(100, 100))
self.hidden_layers.add_module(f'relu_{_+1}', nn.ReLU())
self.output_layer = nn.Linear(100, 1)
def forward(self, x):
x = self.hidden_layers(x)
x = self.output_layer(x)
return x.squeeze(-1)
# 将数据集分割为训练集和验证集
def split_dataset(data, split_ratio):
random.shuffle(data)
split_index = int(len(data) * split_ratio)
train_data = data[:split_index]
validate_data = data[split_index:]
return train_data, validate_data
# 将数据保存为CSV文件
def save_dataset_to_csv(filename, data):
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['kp', 'ki', 'kd', 'setpoint', 'sumWeight'])
writer.writerows(data)
# 加载数据集
def load_dataset_from_csv(filename):
data = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader) # 跳过标题行
for row in reader:
data.append([float(value) for value in row])
return data
# 训练数据
def main():
# 检查是否可用GPU加速
#os.mkdir('temp/')
# 配置日志
logger = logging.getLogger('__dcc_pid_predict__')
if logger.hasHandlers():
logger.handlers.clear()
log_filename = 'log/dcc_pid_predict.log'
log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_handler = TimedRotatingFileHandler(log_filename, when="D", interval=1, backupCount=7)
log_handler.suffix = "%Y-%m-%d.log"
log_handler.encoding = "utf-8"
log_handler.setFormatter(log_formatter)
# 创建日志记录器
logger = logging.getLogger('__dcc_pid_predict__')
logger.setLevel(logging.DEBUG)
logger.addHandler(log_handler)
# 获取当前日期和时间
current_datetime = datetime.datetime.now()
# 根据日期和时间生成自动编号
# 这里使用年月日时分秒作为编号,例如:20230515120000
auto_number = current_datetime.strftime("%Y%m%d%H%M%S")
create_directory_if_not_exists('log/')
create_directory_if_not_exists('data/')
create_directory_if_not_exists('model/')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#model_path = "model/model.pt"
model_dir = "model/"
latest_file = max(
(os.path.join(model_dir, f) for f in os.listdir(model_dir) if f.endswith(".pt")),
key=os.path.getctime,
default=None
)
if latest_file:
model_path = "model/"+os.path.basename(latest_file)
logger.info(model_path)
else:
logger.info("没有找到 .pt 文件")
# 加载训练好的模型
model = NeuralNetwork().to(device)
logger.info("加载模型1")
model.load_state_dict(torch.load(model_path))
logger.info("加载模型2")
model.eval()
logger.info(f"模型已加载:{model_path}")
try:
params = json.loads(input_value.value)
# params = {"username": "admin","tag": "0","guid": "45156B2E-8EDC-41E5-BE2F-030A10A2ECE4","eqname": "esp32","periodid": 1,"kp_min": 5,"kp_step": 0.1,"kp_max": 30,"ki_min": 5,"ki_max": 25,"ki_step": 0.1,"kd_min": 0,"kd_max": 0,"kd_step": 0.1,"setpoint_min": 30.7,"setpoint_max": 30.7,"setpoint_step": 0.1}
#elif "guid" in params and "eqname" in params and "periodid" in params and "kp_min" in params and "kp_max" in params and "kp_step" in params and "ki_min" in params and "ki_max" in params and "ki_step" in params and "kd_min" in params and "kd_max" in params and "kd_step" in params and "setpoint_min" in params and "setpoint_max" in params and "setpoint_step" in params:
guid = params["guid"]
# 指定生成数值的范围和步长
kp_min = params["kp_min"]
kp_max = params["kp_max"]
kp_step = params["kp_step"]
ki_min = params["ki_min"]
ki_max = params["ki_max"]
ki_step = params["ki_step"]
kd_min = params["kd_min"]
kd_max = params["kd_max"]
kd_step = params["kd_step"]
setpoint_min = params["setpoint_min"]
setpoint_max = params["setpoint_max"]
setpoint_step = params["setpoint_step"]
# 生成需要预测的数值
kp_values = None
if kp_min == kp_max:
kp_values = torch.tensor([kp_min], dtype=torch.float32)
else:
kp_values = torch.arange(kp_min,kp_max + kp_step,kp_step, dtype=torch.float32)
ki_values = None
if ki_min == ki_max:
ki_values = torch.tensor([ki_min], dtype=torch.float32)
else:
ki_values = torch.arange(ki_min,ki_max + ki_step,ki_step, dtype=torch.float32)
kd_values = None
if kd_min == kd_max:
kd_values = torch.tensor([kd_min], dtype=torch.float32)
else:
kd_values = torch.arange(kd_min,kd_max + kd_step,kd_step, dtype=torch.float32)
setpoint_values = None
if setpoint_min == setpoint_max:
setpoint_values = torch.tensor([setpoint_min], dtype=torch.float32)
else:
setpoint_values = torch.arange(setpoint_min,setpoint_max + setpoint_step,setpoint_step, dtype=torch.float32)
# 将输入数据转换为张量
input_data = torch.cartesian_prod(kp_values, ki_values, kd_values, setpoint_values).to(device)
# 使用模型进行预测
with torch.inference_mode():
predictions = model(input_data)
# 获取预测的 sumweight 值
sumweight_values = predictions.flatten() # 将张量转换为 Python 列表
result_tensor = torch.cat((input_data,sumweight_values.unsqueeze(0).t()), dim=1)
# 获取当前日期和时间
#current_datetime = datetime.datetime.now()
# 根据日期和时间生成自动编号
# 这里使用年月日时分秒作为编号,例如:20230515120000
auto_number = current_datetime.strftime("%Y%m%d%H%M%S")
# 保存数据集到 CSV 文件
csv_file = f"data/{params['eqname']}_{auto_number}.csv" # 保存的 CSV 文件路径
with open(csv_file, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['kp', 'ki', 'kd', 'setpoint', 'sumweight']) # 写入 CSV 文件的表头
writer.writerows(result_tensor.tolist()) # 写入数据集内容
logger.info(f"数据集已保存到 {csv_file}")
csv_filename = f"{params['eqname']}_{auto_number}.csv"
# 计算预测结果数据集中的最小值及其对应的 kp、ki、kd 和 setpoint 值
# result_tensor = result_tensor.cpu()
min_sumweight, min_index = torch.min(result_tensor[:, 4], dim=0)
# 找到对应的其他维度数值
min_kp = result_tensor[min_index, 0]
min_ki = result_tensor[min_index, 1]
min_kd = result_tensor[min_index, 2]
min_setpoint = result_tensor[min_index, 3]
# 更新记录
isfinish = True
# model_path = model_path
url = fastweb_url + "/?restapi=pid_update_predictlog"
data = {"guid":guid,"isfinish":isfinish,"min_kp": min_kp.item(),"min_ki": min_ki.item(),"min_kd":min_kd.item(),"min_setpoint":min_setpoint.item(),"min_sumweight":min_sumweight.item(),"csv_file":csv_filename,"model_path":""}
data = json.dumps(data)
logger.info(data)
response = requests.post(url, data=data)
if response.status_code == 200:
logger.info("请求成功")
# 上传文件至FastWeb的目录
with open(f'{csv_file}', 'rb') as f:
files = {
'fileName': (csv_filename, f, 'application/octet-stream'),
'filePath': (None, 'temp/')
}
file_params = {
'restapi':'uploadfiles'
}
response = requests.post(fastweb_url,params=file_params, files=files)
os.remove(csv_file)
# 呼叫http 提示已训练完成
data = json.dumps({"username":params['username'],"action":"callback","tag":params['tag'], \
"data":{"callbackcomponent":"WebHomeFrame","callbackeventname":"update", \
"callbackparams":[{"paramname":"messagetype","paramvalue":"success"},{"paramname":"title","paramvalue":"success"},{"paramname":"message","paramvalue":"设备最佳运转效益模型预测分析已完成"}]}})
input_value.value = '预测分析已完成'
url = fastweb_url + "/?restapi=sendwsmsg"
response = requests.post(url, data=data)
if response.status_code == 200:
logger.info("请求成功")
# 保留的系统变量列表(如 __name__ 等)
keep = {'__name__', '__doc__', '__package__', '__loader__', '__spec__', '__builtins__'}
# 遍历全局变量并删除非保留项
for name in list(globals().keys()):
if name not in keep:
#print(name)
del globals()[name]
# gc.collect() # 强制垃圾回收
except Exception as e:
logger.error(e)
#sys.exit(0)
if __name__ == "__main__":
main()
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
将上述程序保存为预设资料。按照下述样式进行保存。

上述程序中定义的参数说明如下:
- 参数名称:
input_value
。
# 3. 调用执行
可以使用FastWeb 数控中心-设备最佳运转效益-PID智能分析助手 (opens new window)来呼叫启用模型分析的Python脚本。设置好调用taskrunner的地址,在分析预测界面点击[分析预测]
,以启用模型预测的过程。预测完成后,可以看到此次预测的记录,以及相关的分析预测结果。点击对应的记录可以查看图表信息。
