Update app.py
Browse files
app.py
CHANGED
@@ -8,30 +8,21 @@ from scipy.optimize import minimize
|
|
8 |
import plotly.express as px
|
9 |
from scipy.stats import t, f
|
10 |
import gradio as gr
|
11 |
-
import io
|
12 |
-
import os
|
13 |
-
from zipfile import ZipFile
|
14 |
-
import warnings
|
15 |
-
|
16 |
-
# Suppress specific warnings
|
17 |
-
warnings.filterwarnings('ignore', category=UserWarning)
|
18 |
-
warnings.filterwarnings('ignore', category=RuntimeWarning)
|
19 |
|
20 |
class RSM_BoxBehnken:
|
21 |
def __init__(self, data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels):
|
22 |
"""
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
Levels of each independent variable
|
35 |
"""
|
36 |
self.data = data.copy()
|
37 |
self.model = None
|
@@ -39,338 +30,484 @@ class RSM_BoxBehnken:
|
|
39 |
self.optimized_results = None
|
40 |
self.optimal_levels = None
|
41 |
|
42 |
-
# Variable names
|
43 |
self.x1_name = x1_name
|
44 |
self.x2_name = x2_name
|
45 |
self.x3_name = x3_name
|
46 |
self.y_name = y_name
|
47 |
|
48 |
-
#
|
49 |
self.x1_levels = x1_levels
|
50 |
self.x2_levels = x2_levels
|
51 |
self.x3_levels = x3_levels
|
52 |
|
53 |
-
def
|
54 |
"""
|
55 |
-
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
variable_name : str
|
60 |
-
Name of the variable
|
61 |
|
62 |
Returns:
|
63 |
-
|
64 |
-
list
|
65 |
-
Levels of the variable
|
66 |
"""
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
raise ValueError(f"
|
75 |
-
|
76 |
-
return level_map[variable_name]
|
77 |
|
78 |
-
def fit_model(self
|
79 |
"""
|
80 |
-
|
81 |
-
|
82 |
-
Parameters:
|
83 |
-
-----------
|
84 |
-
simplified : bool, optional
|
85 |
-
Whether to fit a simplified model, by default False
|
86 |
-
|
87 |
-
Returns:
|
88 |
-
--------
|
89 |
-
tuple
|
90 |
-
Fitted model and Pareto chart
|
91 |
"""
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
107 |
|
108 |
def optimize(self, method='Nelder-Mead'):
|
109 |
"""
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
method : str, optional
|
115 |
-
Optimization method, by default 'Nelder-Mead'
|
116 |
-
|
117 |
-
Returns:
|
118 |
-
--------
|
119 |
-
pandas.DataFrame
|
120 |
-
Optimization results table
|
121 |
"""
|
122 |
if self.model_simplified is None:
|
123 |
-
|
|
|
124 |
|
125 |
def objective_function(x):
|
126 |
-
|
127 |
-
return -self.model_simplified.predict(pd.DataFrame({
|
128 |
-
self.x1_name: [x[0]],
|
129 |
-
self.x2_name: [x[1]],
|
130 |
-
self.x3_name: [x[2]]
|
131 |
-
}))
|
132 |
|
133 |
bounds = [(-1, 1), (-1, 1), (-1, 1)]
|
134 |
x0 = [0, 0, 0]
|
135 |
|
136 |
-
self.optimized_results = minimize(
|
137 |
-
objective_function,
|
138 |
-
x0,
|
139 |
-
method=method,
|
140 |
-
bounds=bounds
|
141 |
-
)
|
142 |
self.optimal_levels = self.optimized_results.x
|
143 |
|
144 |
-
#
|
145 |
optimal_levels_natural = [
|
146 |
-
|
147 |
-
|
|
|
148 |
]
|
149 |
-
|
150 |
optimization_table = pd.DataFrame({
|
151 |
'Variable': [self.x1_name, self.x2_name, self.x3_name],
|
152 |
-
'
|
153 |
-
'
|
154 |
})
|
155 |
|
156 |
return optimization_table
|
157 |
|
158 |
-
def
|
159 |
"""
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
variable_name : str
|
167 |
-
Name of the variable
|
168 |
-
|
169 |
Returns:
|
170 |
-
|
171 |
-
float
|
172 |
-
Natural level of the variable
|
173 |
"""
|
174 |
-
|
175 |
-
|
|
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
Convert natural level to coded value
|
180 |
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
"""
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
return -1 + 2 * (natural_value - levels[0]) / (levels[-1] - levels[0])
|
195 |
|
196 |
def pareto_chart(self, model, title):
|
197 |
"""
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
title : str
|
205 |
-
Title of the Pareto chart
|
206 |
-
|
207 |
-
Returns:
|
208 |
-
--------
|
209 |
-
plotly.graph_objects.Figure
|
210 |
-
Pareto chart
|
211 |
"""
|
212 |
-
|
|
|
213 |
abs_tvalues = np.abs(tvalues)
|
214 |
sorted_idx = np.argsort(abs_tvalues)[::-1]
|
215 |
sorted_tvalues = abs_tvalues[sorted_idx]
|
216 |
sorted_names = tvalues.index[sorted_idx]
|
217 |
|
218 |
-
|
219 |
-
|
|
|
220 |
t_critical = t.ppf(1 - alpha / 2, dof)
|
221 |
|
|
|
222 |
fig = px.bar(
|
223 |
x=sorted_tvalues,
|
224 |
y=sorted_names,
|
225 |
orientation='h',
|
226 |
-
labels={'x': '
|
227 |
title=title
|
228 |
)
|
229 |
fig.update_yaxes(autorange="reversed")
|
|
|
|
|
230 |
fig.add_vline(x=t_critical, line_dash="dot",
|
231 |
-
annotation_text=f"
|
232 |
annotation_position="bottom right")
|
233 |
|
234 |
return fig
|
235 |
|
236 |
-
def
|
237 |
"""
|
238 |
-
|
239 |
-
|
240 |
-
Returns:
|
241 |
-
--------
|
242 |
-
pandas.DataFrame
|
243 |
-
Prediction table
|
244 |
"""
|
245 |
if self.model_simplified is None:
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
-
|
252 |
-
|
253 |
-
prediction_table['Residual'] = residuals.round(3)
|
254 |
|
255 |
-
|
256 |
|
257 |
def calculate_contribution_percentage(self):
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
|
286 |
-
|
287 |
|
288 |
def calculate_detailed_anova(self):
|
289 |
"""
|
290 |
-
|
291 |
-
|
292 |
-
Returns:
|
293 |
-
--------
|
294 |
-
pandas.DataFrame
|
295 |
-
Detailed ANOVA table
|
296 |
"""
|
297 |
if self.model_simplified is None:
|
298 |
-
|
|
|
299 |
|
300 |
-
#
|
301 |
-
|
302 |
-
df_total = len(self.data) - 1
|
303 |
-
|
304 |
-
# ANOVA para modelo reducido
|
305 |
formula_reduced = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \
|
306 |
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)'
|
307 |
model_reduced = smf.ols(formula_reduced, data=self.data).fit()
|
|
|
|
|
308 |
anova_reduced = sm.stats.anova_lm(model_reduced, typ=2)
|
309 |
|
310 |
-
#
|
311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
df_regression = len(anova_reduced) - 1
|
313 |
|
|
|
314 |
ss_residual = self.model_simplified.ssr
|
315 |
df_residual = self.model_simplified.df_resid
|
316 |
|
317 |
-
#
|
318 |
replicas = self.data[self.data.duplicated(subset=[self.x1_name, self.x2_name, self.x3_name], keep=False)]
|
319 |
ss_pure_error = replicas.groupby([self.x1_name, self.x2_name, self.x3_name])[self.y_name].var().sum()
|
320 |
df_pure_error = len(replicas) - len(replicas.groupby([self.x1_name, self.x2_name, self.x3_name]))
|
321 |
|
322 |
-
#
|
323 |
ss_lack_of_fit = ss_residual - ss_pure_error
|
324 |
df_lack_of_fit = df_residual - df_pure_error
|
325 |
|
326 |
-
#
|
327 |
ms_regression = ss_regression / df_regression
|
328 |
ms_residual = ss_residual / df_residual
|
329 |
ms_lack_of_fit = ss_lack_of_fit / df_lack_of_fit
|
330 |
ms_pure_error = ss_pure_error / df_pure_error
|
331 |
|
|
|
332 |
f_lack_of_fit = ms_lack_of_fit / ms_pure_error
|
333 |
-
p_lack_of_fit = 1 - f.cdf(f_lack_of_fit, df_lack_of_fit, df_pure_error)
|
334 |
|
335 |
-
# Crear tabla
|
336 |
detailed_anova_table = pd.DataFrame({
|
337 |
-
'
|
338 |
-
'
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
round(ss_total, 3)
|
344 |
-
],
|
345 |
-
'Degrees of Freedom': [df_regression, df_residual, df_lack_of_fit, df_pure_error, df_total],
|
346 |
-
'Mean Square': [
|
347 |
-
round(ms_regression, 3),
|
348 |
-
round(ms_residual, 3),
|
349 |
-
round(ms_lack_of_fit, 3),
|
350 |
-
round(ms_pure_error, 3),
|
351 |
-
np.nan
|
352 |
-
],
|
353 |
-
'F': [np.nan, np.nan, round(f_lack_of_fit, 3), np.nan, np.nan],
|
354 |
-
'p-value': [np.nan, np.nan, round(p_lack_of_fit, 3), np.nan, np.nan]
|
355 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
356 |
|
357 |
return detailed_anova_table
|
|
|
358 |
# --- Funciones para la interfaz de Gradio ---
|
359 |
|
360 |
def load_data(x1_name, x2_name, x3_name, y_name, x1_levels_str, x2_levels_str, x3_levels_str, data_str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
try:
|
|
|
362 |
x1_levels = [float(x.strip()) for x in x1_levels_str.split(',')]
|
363 |
x2_levels = [float(x.strip()) for x in x2_levels_str.split(',')]
|
364 |
x3_levels = [float(x.strip()) for x in x3_levels_str.split(',')]
|
365 |
|
|
|
366 |
data_list = [row.split(',') for row in data_str.strip().split('\n')]
|
367 |
column_names = ['Exp.', x1_name, x2_name, x3_name, y_name]
|
368 |
data = pd.DataFrame(data_list, columns=column_names)
|
369 |
-
data = data.apply(pd.to_numeric, errors='coerce')
|
370 |
|
|
|
371 |
if not all(col in data.columns for col in column_names):
|
372 |
raise ValueError("El formato de los datos no es correcto.")
|
373 |
|
|
|
374 |
global rsm
|
375 |
rsm = RSM_BoxBehnken(data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels)
|
376 |
|
@@ -390,7 +527,8 @@ def fit_and_optimize_model():
|
|
390 |
prediction_table = rsm.generate_prediction_table()
|
391 |
contribution_table = rsm.calculate_contribution_percentage()
|
392 |
anova_table = rsm.calculate_detailed_anova()
|
393 |
-
|
|
|
394 |
equation_formatted = equation.replace(" + ", "<br>+ ").replace(" ** ", "^").replace("*", " 脳 ")
|
395 |
equation_formatted = f"### Ecuaci贸n del Modelo Simplificado:<br>{equation_formatted}"
|
396 |
|
@@ -400,64 +538,8 @@ def fit_and_optimize_model():
|
|
400 |
def generate_rsm_plot(fixed_variable, fixed_level):
|
401 |
if 'rsm' not in globals():
|
402 |
return None, "Error: Carga los datos primero."
|
403 |
-
|
404 |
-
|
405 |
-
all_figs = rsm.generate_all_plots()
|
406 |
-
|
407 |
-
# Crear una lista de figuras para la salida
|
408 |
-
plot_outputs = []
|
409 |
-
for fig in all_figs:
|
410 |
-
# Convertir la figura a una imagen en formato PNG
|
411 |
-
img_bytes = fig.to_image(format="png")
|
412 |
-
plot_outputs.append(img_bytes)
|
413 |
-
|
414 |
-
# Retornar la lista de im谩genes
|
415 |
-
return plot_outputs
|
416 |
-
|
417 |
-
def download_excel():
|
418 |
-
if 'rsm' not in globals():
|
419 |
-
return None, "Error: Carga los datos y ajusta el modelo primero."
|
420 |
-
|
421 |
-
output = io.BytesIO()
|
422 |
-
with pd.ExcelWriter(output, engine='xlsxwriter') as writer:
|
423 |
-
rsm.data.to_excel(writer, sheet_name='Datos', index=False)
|
424 |
-
rsm.generate_prediction_table().to_excel(writer, sheet_name='Predicciones', index=False)
|
425 |
-
rsm.optimize().to_excel(writer, sheet_name='Optimizacion', index=False)
|
426 |
-
rsm.calculate_contribution_percentage().to_excel(writer, sheet_name='Contribucion', index=False)
|
427 |
-
rsm.calculate_detailed_anova().to_excel(writer, sheet_name='ANOVA', index=False)
|
428 |
-
|
429 |
-
output.seek(0)
|
430 |
-
return gr.File.update(value=output, visible=True, filename="resultados_rsm.xlsx")
|
431 |
-
|
432 |
-
def download_images():
|
433 |
-
if 'rsm' not in globals():
|
434 |
-
return None, "Error: Carga los datos y ajusta el modelo primero."
|
435 |
-
|
436 |
-
# Crear un directorio temporal para guardar las im谩genes
|
437 |
-
temp_dir = "temp_images"
|
438 |
-
os.makedirs(temp_dir, exist_ok=True)
|
439 |
-
|
440 |
-
# Generar todas las gr谩ficas y guardarlas como im谩genes PNG
|
441 |
-
all_figs = rsm.generate_all_plots()
|
442 |
-
for i, fig in enumerate(all_figs):
|
443 |
-
img_path = os.path.join(temp_dir, f"plot_{i}.png")
|
444 |
-
fig.write_image(img_path)
|
445 |
-
|
446 |
-
# Comprimir las im谩genes en un archivo ZIP
|
447 |
-
zip_buffer = io.BytesIO()
|
448 |
-
with ZipFile(zip_buffer, "w") as zip_file:
|
449 |
-
for filename in os.listdir(temp_dir):
|
450 |
-
file_path = os.path.join(temp_dir, filename)
|
451 |
-
zip_file.write(file_path, arcname=filename)
|
452 |
-
|
453 |
-
# Eliminar el directorio temporal
|
454 |
-
for filename in os.listdir(temp_dir):
|
455 |
-
file_path = os.path.join(temp_dir, filename)
|
456 |
-
os.remove(file_path)
|
457 |
-
os.rmdir(temp_dir)
|
458 |
-
|
459 |
-
zip_buffer.seek(0)
|
460 |
-
return gr.File.update(value=zip_buffer, visible=True, filename="graficos_rsm.zip")
|
461 |
|
462 |
# --- Crear la interfaz de Gradio ---
|
463 |
|
@@ -500,8 +582,6 @@ with gr.Blocks() as demo:
|
|
500 |
with gr.Row(visible=False) as analysis_row:
|
501 |
with gr.Column():
|
502 |
fit_button = gr.Button("Ajustar Modelo y Optimizar")
|
503 |
-
download_excel_button = gr.Button("Descargar Tablas en Excel")
|
504 |
-
download_images_button = gr.Button("Descargar Gr谩ficos en ZIP")
|
505 |
gr.Markdown("**Modelo Completo**")
|
506 |
model_completo_output = gr.HTML()
|
507 |
pareto_completo_output = gr.Plot()
|
@@ -518,7 +598,7 @@ with gr.Blocks() as demo:
|
|
518 |
fixed_variable_input = gr.Dropdown(label="Variable Fija", choices=["Glucosa", "Extracto_de_Levadura", "Triptofano"], value="Glucosa")
|
519 |
fixed_level_input = gr.Slider(label="Nivel de Variable Fija", minimum=0, maximum=1, step=0.01, value=0.5)
|
520 |
plot_button = gr.Button("Generar Gr谩fico")
|
521 |
-
rsm_plot_output = gr.
|
522 |
|
523 |
load_button.click(
|
524 |
load_data,
|
@@ -527,12 +607,8 @@ with gr.Blocks() as demo:
|
|
527 |
)
|
528 |
|
529 |
fit_button.click(fit_and_optimize_model, outputs=[model_completo_output, pareto_completo_output, model_simplificado_output, pareto_simplificado_output, equation_output, optimization_table_output, prediction_table_output, contribution_table_output, anova_table_output])
|
530 |
-
|
531 |
plot_button.click(generate_rsm_plot, inputs=[fixed_variable_input, fixed_level_input], outputs=[rsm_plot_output])
|
532 |
|
533 |
-
download_excel_button.click(download_excel, outputs=[gr.File()])
|
534 |
-
download_images_button.click(download_images, outputs=[gr.File()])
|
535 |
-
|
536 |
# Ejemplo de uso
|
537 |
gr.Markdown("## Ejemplo de uso")
|
538 |
gr.Markdown("1. Introduce los nombres de las variables y sus niveles en las cajas de texto correspondientes.")
|
@@ -541,7 +617,5 @@ with gr.Blocks() as demo:
|
|
541 |
gr.Markdown("4. Haz clic en 'Ajustar Modelo y Optimizar' para ajustar el modelo y encontrar los niveles 贸ptimos de los factores.")
|
542 |
gr.Markdown("5. Selecciona una variable fija y su nivel en los controles deslizantes.")
|
543 |
gr.Markdown("6. Haz clic en 'Generar Gr谩fico' para generar un gr谩fico de superficie de respuesta.")
|
544 |
-
gr.Markdown("7. Haz clic en 'Descargar Tablas en Excel' para obtener un archivo Excel con todas las tablas generadas.")
|
545 |
-
gr.Markdown("8. Haz clic en 'Descargar Gr谩ficos en ZIP' para obtener un archivo ZIP con todos los gr谩ficos generados.")
|
546 |
|
547 |
demo.launch()
|
|
|
8 |
import plotly.express as px
|
9 |
from scipy.stats import t, f
|
10 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
class RSM_BoxBehnken:
|
13 |
def __init__(self, data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels):
|
14 |
"""
|
15 |
+
Inicializa la clase con los datos del dise帽o Box-Behnken.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
data (pd.DataFrame): DataFrame con los datos del experimento.
|
19 |
+
x1_name (str): Nombre de la primera variable independiente.
|
20 |
+
x2_name (str): Nombre de la segunda variable independiente.
|
21 |
+
x3_name (str): Nombre de la tercera variable independiente.
|
22 |
+
y_name (str): Nombre de la variable dependiente.
|
23 |
+
x1_levels (list): Niveles de la primera variable independiente.
|
24 |
+
x2_levels (list): Niveles de la segunda variable independiente.
|
25 |
+
x3_levels (list): Niveles de la tercera variable independiente.
|
|
|
26 |
"""
|
27 |
self.data = data.copy()
|
28 |
self.model = None
|
|
|
30 |
self.optimized_results = None
|
31 |
self.optimal_levels = None
|
32 |
|
|
|
33 |
self.x1_name = x1_name
|
34 |
self.x2_name = x2_name
|
35 |
self.x3_name = x3_name
|
36 |
self.y_name = y_name
|
37 |
|
38 |
+
# Niveles originales de las variables
|
39 |
self.x1_levels = x1_levels
|
40 |
self.x2_levels = x2_levels
|
41 |
self.x3_levels = x3_levels
|
42 |
|
43 |
+
def get_levels(self, variable_name):
|
44 |
"""
|
45 |
+
Obtiene los niveles para una variable espec铆fica.
|
46 |
|
47 |
+
Args:
|
48 |
+
variable_name (str): Nombre de la variable.
|
|
|
|
|
49 |
|
50 |
Returns:
|
51 |
+
list: Niveles de la variable.
|
|
|
|
|
52 |
"""
|
53 |
+
if variable_name == self.x1_name:
|
54 |
+
return self.x1_levels
|
55 |
+
elif variable_name == self.x2_name:
|
56 |
+
return self.x2_levels
|
57 |
+
elif variable_name == self.x3_name:
|
58 |
+
return self.x3_levels
|
59 |
+
else:
|
60 |
+
raise ValueError(f"Variable desconocida: {variable_name}")
|
|
|
|
|
61 |
|
62 |
+
def fit_model(self):
|
63 |
"""
|
64 |
+
Ajusta el modelo de segundo orden completo a los datos.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
"""
|
66 |
+
formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \
|
67 |
+
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2) + ' \
|
68 |
+
f'{self.x1_name}:{self.x2_name} + {self.x1_name}:{self.x3_name} + {self.x2_name}:{self.x3_name}'
|
69 |
+
self.model = smf.ols(formula, data=self.data).fit()
|
70 |
+
print("Modelo Completo:")
|
71 |
+
print(self.model.summary())
|
72 |
+
return self.model, self.pareto_chart(self.model, "Pareto - Modelo Completo")
|
73 |
+
|
74 |
+
def fit_simplified_model(self):
|
75 |
+
"""
|
76 |
+
Ajusta el modelo de segundo orden a los datos, eliminando t茅rminos no significativos.
|
77 |
+
"""
|
78 |
+
formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + ' \
|
79 |
+
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)'
|
80 |
+
self.model_simplified = smf.ols(formula, data=self.data).fit()
|
81 |
+
print("\nModelo Simplificado:")
|
82 |
+
print(self.model_simplified.summary())
|
83 |
+
return self.model_simplified, self.pareto_chart(self.model_simplified, "Pareto - Modelo Simplificado")
|
84 |
|
85 |
def optimize(self, method='Nelder-Mead'):
|
86 |
"""
|
87 |
+
Encuentra los niveles 贸ptimos de los factores para maximizar la respuesta usando el modelo simplificado.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
method (str): M茅todo de optimizaci贸n a utilizar (por defecto, 'Nelder-Mead').
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
"""
|
92 |
if self.model_simplified is None:
|
93 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
94 |
+
return
|
95 |
|
96 |
def objective_function(x):
|
97 |
+
return -self.model_simplified.predict(pd.DataFrame({self.x1_name: [x[0]], self.x2_name: [x[1]], self.x3_name: [x[2]]}))
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
bounds = [(-1, 1), (-1, 1), (-1, 1)]
|
100 |
x0 = [0, 0, 0]
|
101 |
|
102 |
+
self.optimized_results = minimize(objective_function, x0, method=method, bounds=bounds)
|
|
|
|
|
|
|
|
|
|
|
103 |
self.optimal_levels = self.optimized_results.x
|
104 |
|
105 |
+
# Convertir niveles 贸ptimos de codificados a naturales
|
106 |
optimal_levels_natural = [
|
107 |
+
self.coded_to_natural(self.optimal_levels[0], self.x1_name),
|
108 |
+
self.coded_to_natural(self.optimal_levels[1], self.x2_name),
|
109 |
+
self.coded_to_natural(self.optimal_levels[2], self.x3_name)
|
110 |
]
|
111 |
+
# Crear la tabla de optimizaci贸n
|
112 |
optimization_table = pd.DataFrame({
|
113 |
'Variable': [self.x1_name, self.x2_name, self.x3_name],
|
114 |
+
'Nivel 脫ptimo (Natural)': optimal_levels_natural,
|
115 |
+
'Nivel 脫ptimo (Codificado)': self.optimal_levels
|
116 |
})
|
117 |
|
118 |
return optimization_table
|
119 |
|
120 |
+
def plot_rsm_individual(self, fixed_variable, fixed_level):
|
121 |
"""
|
122 |
+
Genera un gr谩fico de superficie de respuesta (RSM) individual para una configuraci贸n espec铆fica.
|
123 |
+
|
124 |
+
Args:
|
125 |
+
fixed_variable (str): Nombre de la variable a mantener fija.
|
126 |
+
fixed_level (float): Nivel al que se fija la variable (en unidades naturales).
|
127 |
+
|
|
|
|
|
|
|
128 |
Returns:
|
129 |
+
go.Figure: Objeto de figura de Plotly.
|
|
|
|
|
130 |
"""
|
131 |
+
if self.model_simplified is None:
|
132 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
133 |
+
return None
|
134 |
|
135 |
+
# Determinar las variables que var铆an y sus niveles naturales
|
136 |
+
varying_variables = [var for var in [self.x1_name, self.x2_name, self.x3_name] if var != fixed_variable]
|
|
|
137 |
|
138 |
+
# Establecer los niveles naturales para las variables que var铆an
|
139 |
+
x_natural_levels = self.get_levels(varying_variables[0])
|
140 |
+
y_natural_levels = self.get_levels(varying_variables[1])
|
141 |
+
|
142 |
+
# Crear una malla de puntos para las variables que var铆an (en unidades naturales)
|
143 |
+
x_range_natural = np.linspace(x_natural_levels[0], x_natural_levels[-1], 100)
|
144 |
+
y_range_natural = np.linspace(y_natural_levels[0], y_natural_levels[-1], 100)
|
145 |
+
x_grid_natural, y_grid_natural = np.meshgrid(x_range_natural, y_range_natural)
|
146 |
+
|
147 |
+
# Convertir la malla de variables naturales a codificadas
|
148 |
+
x_grid_coded = self.natural_to_coded(x_grid_natural, varying_variables[0])
|
149 |
+
y_grid_coded = self.natural_to_coded(y_grid_natural, varying_variables[1])
|
150 |
+
|
151 |
+
# Crear un DataFrame para la predicci贸n con variables codificadas
|
152 |
+
prediction_data = pd.DataFrame({
|
153 |
+
varying_variables[0]: x_grid_coded.flatten(),
|
154 |
+
varying_variables[1]: y_grid_coded.flatten(),
|
155 |
+
})
|
156 |
+
prediction_data[fixed_variable] = self.natural_to_coded(fixed_level, fixed_variable)
|
157 |
+
|
158 |
+
# Calcular los valores predichos
|
159 |
+
z_pred = self.model_simplified.predict(prediction_data).values.reshape(x_grid_coded.shape)
|
160 |
+
|
161 |
+
# 1. Identificar los dos factores que var铆an
|
162 |
+
varying_variables = [var for var in [self.x1_name, self.x2_name, self.x3_name] if var != fixed_variable]
|
163 |
+
|
164 |
+
# 2. Filtrar por el nivel de la variable fija (en codificado)
|
165 |
+
fixed_level_coded = self.natural_to_coded(fixed_level, fixed_variable)
|
166 |
+
subset_data = self.data[np.isclose(self.data[fixed_variable], fixed_level_coded)]
|
167 |
+
|
168 |
+
# 3. Filtrar por niveles v谩lidos en las variables que var铆an
|
169 |
+
valid_levels = [-1, 0, 1]
|
170 |
+
experiments_data = subset_data[
|
171 |
+
subset_data[varying_variables[0]].isin(valid_levels) &
|
172 |
+
subset_data[varying_variables[1]].isin(valid_levels)
|
173 |
+
]
|
174 |
+
|
175 |
+
# Convertir coordenadas de experimentos a naturales
|
176 |
+
experiments_x_natural = experiments_data[varying_variables[0]].apply(lambda x: self.coded_to_natural(x, varying_variables[0]))
|
177 |
+
experiments_y_natural = experiments_data[varying_variables[1]].apply(lambda x: self.coded_to_natural(x, varying_variables[1]))
|
178 |
+
|
179 |
+
# Crear el gr谩fico de superficie con variables naturales en los ejes y transparencia
|
180 |
+
fig = go.Figure(data=[go.Surface(z=z_pred, x=x_grid_natural, y=y_grid_natural, colorscale='Viridis', opacity=0.7, showscale=True)])
|
181 |
+
|
182 |
+
# --- A帽adir cuadr铆cula a la superficie ---
|
183 |
+
# L铆neas en la direcci贸n x
|
184 |
+
for i in range(x_grid_natural.shape[0]):
|
185 |
+
fig.add_trace(go.Scatter3d(
|
186 |
+
x=x_grid_natural[i, :],
|
187 |
+
y=y_grid_natural[i, :],
|
188 |
+
z=z_pred[i, :],
|
189 |
+
mode='lines',
|
190 |
+
line=dict(color='gray', width=2),
|
191 |
+
showlegend=False,
|
192 |
+
hoverinfo='skip'
|
193 |
+
))
|
194 |
+
# L铆neas en la direcci贸n y
|
195 |
+
for j in range(x_grid_natural.shape[1]):
|
196 |
+
fig.add_trace(go.Scatter3d(
|
197 |
+
x=x_grid_natural[:, j],
|
198 |
+
y=y_grid_natural[:, j],
|
199 |
+
z=z_pred[:, j],
|
200 |
+
mode='lines',
|
201 |
+
line=dict(color='gray', width=2),
|
202 |
+
showlegend=False,
|
203 |
+
hoverinfo='skip'
|
204 |
+
))
|
205 |
+
|
206 |
+
# --- Fin de la adici贸n de la cuadr铆cula ---
|
207 |
+
|
208 |
+
# A帽adir los puntos de los experimentos en la superficie de respuesta con diferentes colores y etiquetas
|
209 |
+
# Crear una lista de colores y etiquetas para los puntos
|
210 |
+
colors = ['red', 'blue', 'green', 'purple', 'orange', 'yellow', 'cyan', 'magenta']
|
211 |
+
point_labels = []
|
212 |
+
for i, row in experiments_data.iterrows():
|
213 |
+
point_labels.append(f"{row[self.y_name]:.2f}")
|
214 |
+
|
215 |
+
fig.add_trace(go.Scatter3d(
|
216 |
+
x=experiments_x_natural,
|
217 |
+
y=experiments_y_natural,
|
218 |
+
z=experiments_data[self.y_name],
|
219 |
+
mode='markers+text',
|
220 |
+
marker=dict(size=4, color=colors[:len(experiments_x_natural)]), # Usar colores de la lista
|
221 |
+
text=point_labels, # Usar las etiquetas creadas
|
222 |
+
textposition='top center',
|
223 |
+
name='Experimentos'
|
224 |
+
))
|
225 |
+
|
226 |
+
# A帽adir etiquetas y t铆tulo con variables naturales
|
227 |
+
fig.update_layout(
|
228 |
+
scene=dict(
|
229 |
+
xaxis_title=varying_variables[0] + " (g/L)",
|
230 |
+
yaxis_title=varying_variables[1] + " (g/L)",
|
231 |
+
zaxis_title=self.y_name,
|
232 |
+
# Puedes mantener la configuraci贸n de grid en los planos si lo deseas
|
233 |
+
# xaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray'),
|
234 |
+
# yaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray'),
|
235 |
+
# zaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray')
|
236 |
+
),
|
237 |
+
title=f"{self.y_name} vs {varying_variables[0]} y {varying_variables[1]}<br><sup>{fixed_variable} fijo en {fixed_level:.2f} (g/L) (Modelo Simplificado)</sup>",
|
238 |
+
height=800,
|
239 |
+
width=1000,
|
240 |
+
showlegend=True
|
241 |
+
)
|
242 |
+
return fig
|
243 |
+
|
244 |
+
def generate_all_plots(self):
|
245 |
"""
|
246 |
+
Genera todas las gr谩ficas de RSM, variando la variable fija y sus niveles usando el modelo simplificado.
|
247 |
+
"""
|
248 |
+
if self.model_simplified is None:
|
249 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
250 |
+
return
|
251 |
+
|
252 |
+
# Niveles naturales para graficar
|
253 |
+
levels_to_plot_natural = {
|
254 |
+
self.x1_name: self.x1_levels,
|
255 |
+
self.x2_name: self.x2_levels,
|
256 |
+
self.x3_name: self.x3_levels
|
257 |
+
}
|
258 |
+
|
259 |
+
# Generar y mostrar gr谩ficos individuales
|
260 |
+
for fixed_variable in [self.x1_name, self.x2_name, self.x3_name]:
|
261 |
+
for level in levels_to_plot_natural[fixed_variable]:
|
262 |
+
fig = self.plot_rsm_individual(fixed_variable, level)
|
263 |
+
if fig is not None:
|
264 |
+
fig.show()
|
265 |
+
|
266 |
+
def coded_to_natural(self, coded_value, variable_name):
|
267 |
+
"""Convierte un valor codificado a su valor natural."""
|
268 |
+
levels = self.get_levels(variable_name)
|
269 |
+
return levels[0] + (coded_value + 1) * (levels[-1] - levels[0]) / 2
|
270 |
+
|
271 |
+
def natural_to_coded(self, natural_value, variable_name):
|
272 |
+
"""Convierte un valor natural a su valor codificado."""
|
273 |
+
levels = self.get_levels(variable_name)
|
274 |
return -1 + 2 * (natural_value - levels[0]) / (levels[-1] - levels[0])
|
275 |
|
276 |
def pareto_chart(self, model, title):
|
277 |
"""
|
278 |
+
Genera un diagrama de Pareto para los efectos estandarizados de un modelo,
|
279 |
+
incluyendo la l铆nea de significancia.
|
280 |
+
|
281 |
+
Args:
|
282 |
+
model: Modelo ajustado de statsmodels.
|
283 |
+
title (str): T铆tulo del gr谩fico.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
"""
|
285 |
+
# Calcular los efectos estandarizados
|
286 |
+
tvalues = model.tvalues[1:] # Excluir la Intercept
|
287 |
abs_tvalues = np.abs(tvalues)
|
288 |
sorted_idx = np.argsort(abs_tvalues)[::-1]
|
289 |
sorted_tvalues = abs_tvalues[sorted_idx]
|
290 |
sorted_names = tvalues.index[sorted_idx]
|
291 |
|
292 |
+
# Calcular el valor cr铆tico de t para la l铆nea de significancia
|
293 |
+
alpha = 0.05 # Nivel de significancia
|
294 |
+
dof = model.df_resid # Grados de libertad residuales
|
295 |
t_critical = t.ppf(1 - alpha / 2, dof)
|
296 |
|
297 |
+
# Crear el diagrama de Pareto
|
298 |
fig = px.bar(
|
299 |
x=sorted_tvalues,
|
300 |
y=sorted_names,
|
301 |
orientation='h',
|
302 |
+
labels={'x': 'Efecto Estandarizado', 'y': 'T茅rmino'},
|
303 |
title=title
|
304 |
)
|
305 |
fig.update_yaxes(autorange="reversed")
|
306 |
+
|
307 |
+
# Agregar la l铆nea de significancia
|
308 |
fig.add_vline(x=t_critical, line_dash="dot",
|
309 |
+
annotation_text=f"t cr铆tico = {t_critical:.2f}",
|
310 |
annotation_position="bottom right")
|
311 |
|
312 |
return fig
|
313 |
|
314 |
+
def get_simplified_equation(self):
|
315 |
"""
|
316 |
+
Imprime la ecuaci贸n del modelo simplificado.
|
|
|
|
|
|
|
|
|
|
|
317 |
"""
|
318 |
if self.model_simplified is None:
|
319 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
320 |
+
return None
|
321 |
+
|
322 |
+
coefficients = self.model_simplified.params
|
323 |
+
equation = f"{self.y_name} = {coefficients['Intercept']:.4f}"
|
324 |
+
|
325 |
+
for term, coef in coefficients.items():
|
326 |
+
if term != 'Intercept':
|
327 |
+
if term == f'{self.x1_name}':
|
328 |
+
equation += f" + {coef:.4f}*{self.x1_name}"
|
329 |
+
elif term == f'{self.x2_name}':
|
330 |
+
equation += f" + {coef:.4f}*{self.x2_name}"
|
331 |
+
elif term == f'{self.x3_name}':
|
332 |
+
equation += f" + {coef:.4f}*{self.x3_name}"
|
333 |
+
elif term == f'I({self.x1_name} ** 2)':
|
334 |
+
equation += f" + {coef:.4f}*{self.x1_name}^2"
|
335 |
+
elif term == f'I({self.x2_name} ** 2)':
|
336 |
+
equation += f" + {coef:.4f}*{self.x2_name}^2"
|
337 |
+
elif term == f'I({self.x3_name} ** 2)':
|
338 |
+
equation += f" + {coef:.4f}*{self.x3_name}^2"
|
339 |
+
|
340 |
+
return equation
|
341 |
|
342 |
+
def generate_prediction_table(self):
|
343 |
+
"""
|
344 |
+
Genera una tabla con los valores actuales, predichos y residuales.
|
345 |
+
"""
|
346 |
+
if self.model_simplified is None:
|
347 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
348 |
+
return None
|
349 |
|
350 |
+
self.data['Predicho'] = self.model_simplified.predict(self.data)
|
351 |
+
self.data['Residual'] = self.data[self.y_name] - self.data['Predicho']
|
|
|
352 |
|
353 |
+
return self.data[[self.y_name, 'Predicho', 'Residual']]
|
354 |
|
355 |
def calculate_contribution_percentage(self):
|
356 |
+
"""
|
357 |
+
Calcula el porcentaje de contribuci贸n de cada factor a la variabilidad de la respuesta (AIA).
|
358 |
+
"""
|
359 |
+
if self.model_simplified is None:
|
360 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
361 |
+
return None
|
362 |
+
|
363 |
+
# ANOVA del modelo simplificado
|
364 |
+
anova_table = sm.stats.anova_lm(self.model_simplified, typ=2)
|
365 |
+
|
366 |
+
# Suma de cuadrados total
|
367 |
+
ss_total = anova_table['sum_sq'].sum()
|
368 |
+
|
369 |
+
# Crear tabla de contribuci贸n
|
370 |
+
contribution_table = pd.DataFrame({
|
371 |
+
'Factor': [],
|
372 |
+
'Suma de Cuadrados': [],
|
373 |
+
'% Contribuci贸n': []
|
374 |
+
})
|
375 |
+
|
376 |
+
# Calcular porcentaje de contribuci贸n para cada factor
|
377 |
+
for index, row in anova_table.iterrows():
|
378 |
+
if index != 'Residual':
|
379 |
+
factor_name = index
|
380 |
+
if factor_name == f'I({self.x1_name} ** 2)':
|
381 |
+
factor_name = f'{self.x1_name}^2'
|
382 |
+
elif factor_name == f'I({self.x2_name} ** 2)':
|
383 |
+
factor_name = f'{self.x2_name}^2'
|
384 |
+
elif factor_name == f'I({self.x3_name} ** 2)':
|
385 |
+
factor_name = f'{self.x3_name}^2'
|
386 |
+
|
387 |
+
ss_factor = row['sum_sq']
|
388 |
+
contribution_percentage = (ss_factor / ss_total) * 100
|
389 |
|
390 |
+
contribution_table = pd.concat([contribution_table, pd.DataFrame({
|
391 |
+
'Factor': [factor_name],
|
392 |
+
'Suma de Cuadrados': [ss_factor],
|
393 |
+
'% Contribuci贸n': [contribution_percentage]
|
394 |
+
})], ignore_index=True)
|
395 |
|
396 |
+
return contribution_table
|
397 |
|
398 |
def calculate_detailed_anova(self):
|
399 |
"""
|
400 |
+
Calcula la tabla ANOVA detallada con la descomposici贸n del error residual.
|
|
|
|
|
|
|
|
|
|
|
401 |
"""
|
402 |
if self.model_simplified is None:
|
403 |
+
print("Error: Ajusta el modelo simplificado primero.")
|
404 |
+
return None
|
405 |
|
406 |
+
# --- ANOVA detallada ---
|
407 |
+
# 1. Ajustar un modelo solo con los t茅rminos de primer orden y cuadr谩ticos
|
|
|
|
|
|
|
408 |
formula_reduced = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \
|
409 |
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)'
|
410 |
model_reduced = smf.ols(formula_reduced, data=self.data).fit()
|
411 |
+
|
412 |
+
# 2. ANOVA del modelo reducido (para obtener la suma de cuadrados de la regresi贸n)
|
413 |
anova_reduced = sm.stats.anova_lm(model_reduced, typ=2)
|
414 |
|
415 |
+
# 3. Suma de cuadrados total
|
416 |
+
ss_total = np.sum((self.data[self.y_name] - self.data[self.y_name].mean())**2)
|
417 |
+
|
418 |
+
# 4. Grados de libertad totales
|
419 |
+
df_total = len(self.data) - 1
|
420 |
+
|
421 |
+
# 5. Suma de cuadrados de la regresi贸n
|
422 |
+
ss_regression = anova_reduced['sum_sq'][:-1].sum() # Sumar todo excepto 'Residual'
|
423 |
+
|
424 |
+
# 6. Grados de libertad de la regresi贸n
|
425 |
df_regression = len(anova_reduced) - 1
|
426 |
|
427 |
+
# 7. Suma de cuadrados del error residual
|
428 |
ss_residual = self.model_simplified.ssr
|
429 |
df_residual = self.model_simplified.df_resid
|
430 |
|
431 |
+
# 8. Suma de cuadrados del error puro (se calcula a partir de las r茅plicas)
|
432 |
replicas = self.data[self.data.duplicated(subset=[self.x1_name, self.x2_name, self.x3_name], keep=False)]
|
433 |
ss_pure_error = replicas.groupby([self.x1_name, self.x2_name, self.x3_name])[self.y_name].var().sum()
|
434 |
df_pure_error = len(replicas) - len(replicas.groupby([self.x1_name, self.x2_name, self.x3_name]))
|
435 |
|
436 |
+
# 9. Suma de cuadrados de la falta de ajuste
|
437 |
ss_lack_of_fit = ss_residual - ss_pure_error
|
438 |
df_lack_of_fit = df_residual - df_pure_error
|
439 |
|
440 |
+
# 10. Cuadrados medios
|
441 |
ms_regression = ss_regression / df_regression
|
442 |
ms_residual = ss_residual / df_residual
|
443 |
ms_lack_of_fit = ss_lack_of_fit / df_lack_of_fit
|
444 |
ms_pure_error = ss_pure_error / df_pure_error
|
445 |
|
446 |
+
# 11. Estad铆stico F y valor p para la falta de ajuste
|
447 |
f_lack_of_fit = ms_lack_of_fit / ms_pure_error
|
448 |
+
p_lack_of_fit = 1 - f.cdf(f_lack_of_fit, df_lack_of_fit, df_pure_error) # Usar f.cdf de scipy.stats
|
449 |
|
450 |
+
# 12. Crear la tabla ANOVA detallada
|
451 |
detailed_anova_table = pd.DataFrame({
|
452 |
+
'Fuente de Variaci贸n': ['Regresi贸n', 'Residual', 'Falta de Ajuste', 'Error Puro', 'Total'],
|
453 |
+
'Suma de Cuadrados': [ss_regression, ss_residual, ss_lack_of_fit, ss_pure_error, ss_total],
|
454 |
+
'Grados de Libertad': [df_regression, df_residual, df_lack_of_fit, df_pure_error, df_total],
|
455 |
+
'Cuadrado Medio': [ms_regression, ms_residual, ms_lack_of_fit, ms_pure_error, np.nan],
|
456 |
+
'F': [np.nan, np.nan, f_lack_of_fit, np.nan, np.nan],
|
457 |
+
'Valor p': [np.nan, np.nan, p_lack_of_fit, np.nan, np.nan]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
})
|
459 |
+
|
460 |
+
# Calcular la suma de cuadrados y grados de libertad para la curvatura
|
461 |
+
ss_curvature = anova_reduced['sum_sq'][f'I({self.x1_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x2_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x3_name} ** 2)']
|
462 |
+
df_curvature = 3
|
463 |
+
|
464 |
+
# A帽adir la fila de curvatura a la tabla ANOVA
|
465 |
+
detailed_anova_table.loc[len(detailed_anova_table)] = ['Curvatura', ss_curvature, df_curvature, ss_curvature / df_curvature, np.nan, np.nan]
|
466 |
+
|
467 |
+
# Reorganizar las filas para que la curvatura aparezca despu茅s de la regresi贸n
|
468 |
+
detailed_anova_table = detailed_anova_table.reindex([0, 5, 1, 2, 3, 4])
|
469 |
+
|
470 |
+
# Resetear el 铆ndice para que sea consecutivo
|
471 |
+
detailed_anova_table = detailed_anova_table.reset_index(drop=True)
|
472 |
|
473 |
return detailed_anova_table
|
474 |
+
|
475 |
# --- Funciones para la interfaz de Gradio ---
|
476 |
|
477 |
def load_data(x1_name, x2_name, x3_name, y_name, x1_levels_str, x2_levels_str, x3_levels_str, data_str):
|
478 |
+
"""
|
479 |
+
Carga los datos del dise帽o Box-Behnken desde cajas de texto y crea la instancia de RSM_BoxBehnken.
|
480 |
+
|
481 |
+
Args:
|
482 |
+
x1_name (str): Nombre de la primera variable independiente.
|
483 |
+
x2_name (str): Nombre de la segunda variable independiente.
|
484 |
+
x3_name (str): Nombre de la tercera variable independiente.
|
485 |
+
y_name (str): Nombre de la variable dependiente.
|
486 |
+
x1_levels_str (str): Niveles de la primera variable, separados por comas.
|
487 |
+
x2_levels_str (str): Niveles de la segunda variable, separados por comas.
|
488 |
+
x3_levels_str (str): Niveles de la tercera variable, separados por comas.
|
489 |
+
data_str (str): Datos del experimento en formato CSV, separados por comas.
|
490 |
+
|
491 |
+
Returns:
|
492 |
+
tuple: (pd.DataFrame, str, str, str, str, list, list, list, gr.update)
|
493 |
+
"""
|
494 |
try:
|
495 |
+
# Convertir los niveles a listas de n煤meros
|
496 |
x1_levels = [float(x.strip()) for x in x1_levels_str.split(',')]
|
497 |
x2_levels = [float(x.strip()) for x in x2_levels_str.split(',')]
|
498 |
x3_levels = [float(x.strip()) for x in x3_levels_str.split(',')]
|
499 |
|
500 |
+
# Crear DataFrame a partir de la cadena de datos
|
501 |
data_list = [row.split(',') for row in data_str.strip().split('\n')]
|
502 |
column_names = ['Exp.', x1_name, x2_name, x3_name, y_name]
|
503 |
data = pd.DataFrame(data_list, columns=column_names)
|
504 |
+
data = data.apply(pd.to_numeric, errors='coerce') # Convertir a num茅rico
|
505 |
|
506 |
+
# Validar que el DataFrame tenga las columnas correctas
|
507 |
if not all(col in data.columns for col in column_names):
|
508 |
raise ValueError("El formato de los datos no es correcto.")
|
509 |
|
510 |
+
# Crear la instancia de RSM_BoxBehnken
|
511 |
global rsm
|
512 |
rsm = RSM_BoxBehnken(data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels)
|
513 |
|
|
|
527 |
prediction_table = rsm.generate_prediction_table()
|
528 |
contribution_table = rsm.calculate_contribution_percentage()
|
529 |
anova_table = rsm.calculate_detailed_anova()
|
530 |
+
|
531 |
+
# Formatear la ecuaci贸n para que se vea mejor en Markdown
|
532 |
equation_formatted = equation.replace(" + ", "<br>+ ").replace(" ** ", "^").replace("*", " 脳 ")
|
533 |
equation_formatted = f"### Ecuaci贸n del Modelo Simplificado:<br>{equation_formatted}"
|
534 |
|
|
|
538 |
def generate_rsm_plot(fixed_variable, fixed_level):
|
539 |
if 'rsm' not in globals():
|
540 |
return None, "Error: Carga los datos primero."
|
541 |
+
fig = rsm.plot_rsm_individual(fixed_variable, fixed_level)
|
542 |
+
return fig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
|
544 |
# --- Crear la interfaz de Gradio ---
|
545 |
|
|
|
582 |
with gr.Row(visible=False) as analysis_row:
|
583 |
with gr.Column():
|
584 |
fit_button = gr.Button("Ajustar Modelo y Optimizar")
|
|
|
|
|
585 |
gr.Markdown("**Modelo Completo**")
|
586 |
model_completo_output = gr.HTML()
|
587 |
pareto_completo_output = gr.Plot()
|
|
|
598 |
fixed_variable_input = gr.Dropdown(label="Variable Fija", choices=["Glucosa", "Extracto_de_Levadura", "Triptofano"], value="Glucosa")
|
599 |
fixed_level_input = gr.Slider(label="Nivel de Variable Fija", minimum=0, maximum=1, step=0.01, value=0.5)
|
600 |
plot_button = gr.Button("Generar Gr谩fico")
|
601 |
+
rsm_plot_output = gr.Plot()
|
602 |
|
603 |
load_button.click(
|
604 |
load_data,
|
|
|
607 |
)
|
608 |
|
609 |
fit_button.click(fit_and_optimize_model, outputs=[model_completo_output, pareto_completo_output, model_simplificado_output, pareto_simplificado_output, equation_output, optimization_table_output, prediction_table_output, contribution_table_output, anova_table_output])
|
|
|
610 |
plot_button.click(generate_rsm_plot, inputs=[fixed_variable_input, fixed_level_input], outputs=[rsm_plot_output])
|
611 |
|
|
|
|
|
|
|
612 |
# Ejemplo de uso
|
613 |
gr.Markdown("## Ejemplo de uso")
|
614 |
gr.Markdown("1. Introduce los nombres de las variables y sus niveles en las cajas de texto correspondientes.")
|
|
|
617 |
gr.Markdown("4. Haz clic en 'Ajustar Modelo y Optimizar' para ajustar el modelo y encontrar los niveles 贸ptimos de los factores.")
|
618 |
gr.Markdown("5. Selecciona una variable fija y su nivel en los controles deslizantes.")
|
619 |
gr.Markdown("6. Haz clic en 'Generar Gr谩fico' para generar un gr谩fico de superficie de respuesta.")
|
|
|
|
|
620 |
|
621 |
demo.launch()
|