-
Notifications
You must be signed in to change notification settings - Fork 0
/
Ml-app.py
207 lines (185 loc) · 9.47 KB
/
Ml-app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import datasets
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from tpot import TPOTClassifier, TPOTRegressor
from gplearn.genetic import SymbolicRegressor, SymbolicClassifier
# Set up the Streamlit app
st.title("Machine Learning App with Genetic Programming")
st.sidebar.title("Upload or Select Dataset")
data_source = st.sidebar.radio("Choose data source:", ("Upload", "Pre-existing"))
df = None # Initialize df to avoid NameError
# Handle data upload or selection
if data_source == "Upload":
uploaded_file = st.sidebar.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file:
df = pd.read_csv(uploaded_file)
st.write("Dataset successfully loaded.")
else:
st.write("Please upload a valid CSV file.")
else:
dataset_name = st.sidebar.selectbox("Select a dataset", ["Iris", "Breast Cancer", "Wine", "Diabetes", "Boston Housing"])
if dataset_name == "Iris":
df = datasets.load_iris(as_frame=True).frame
elif dataset_name == "Breast Cancer":
df = datasets.load_breast_cancer(as_frame=True).frame
elif dataset_name == "Wine":
df = datasets.load_wine(as_frame=True).frame
elif dataset_name == "Diabetes":
df = datasets.load_diabetes(as_frame=True).frame
elif dataset_name == "Boston Housing":
df = datasets.load_boston(as_frame=True).frame
# Check if df is defined before attempting to use it
if df is not None:
st.write("### Dataset Preview", df.head())
# Feature selection and target
st.sidebar.subheader("Features & Target")
# Option to select all features
select_all = st.sidebar.checkbox("Select all features")
if select_all:
features = df.columns[:-1].tolist()
else:
features = st.sidebar.multiselect("Select features", df.columns[:-1])
target = st.sidebar.selectbox("Select target", df.columns)
X = df[features]
y = df[target]
# Data Visualization for Outliers
st.sidebar.subheader("Data Visualization")
visualize_features = st.sidebar.multiselect("Select features to visualize", features)
if visualize_features:
for feature in visualize_features:
st.write(f"### Distribution of {feature}")
fig, ax = plt.subplots()
sns.boxplot(x=df[feature], ax=ax)
st.pyplot(fig)
# Data Transformation
st.sidebar.subheader("Data Transformation")
transformation = st.sidebar.selectbox("Select transformation technique", ["None", "Normalization (Min-Max Scaling)", "Standardization (Z-Score)", "Robust Scaling"])
if transformation == "Normalization (Min-Max Scaling)":
scaler = MinMaxScaler()
X[features] = scaler.fit_transform(X[features])
elif transformation == "Standardization (Z-Score)":
scaler = StandardScaler()
X[features] = scaler.fit_transform(X[features])
elif transformation == "Robust Scaling":
scaler = RobustScaler()
X[features] = scaler.fit_transform(X[features])
# Model selection
st.sidebar.subheader("Choose Model")
model_name = st.sidebar.selectbox("Model", [
"Decision Tree Classifier",
"Random Forest Classifier",
"SVM Classifier",
"Logistic Regression",
"Naive Bayes",
"K-Nearest Neighbors Classifier",
"KMeans",
"Linear Regression",
"Decision Tree Regressor",
"Random Forest Regressor",
"SVM Regressor",
"K-Nearest Neighbors Regressor",
"TPOT Classifier (Genetic Algorithm)",
"TPOT Regressor (Genetic Algorithm)",
"Symbolic Classifier (Genetic Programming)",
"Symbolic Regressor (Genetic Programming)"
])
# Hyperparameter selection
st.sidebar.subheader("Hyperparameter Selection")
# Initialize the model
if model_name == "Decision Tree Classifier":
max_depth = st.sidebar.slider("Max Depth", 1, 20, 5)
model = DecisionTreeClassifier(max_depth=max_depth)
elif model_name == "Random Forest Classifier":
n_estimators = st.sidebar.slider("Number of Estimators", 10, 200, 100)
max_depth = st.sidebar.slider("Max Depth", 1, 20, 5)
model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth)
elif model_name == "SVM Classifier":
C = st.sidebar.slider("C (Regularization Parameter)", 0.01, 10.0, 1.0)
kernel = st.sidebar.selectbox("Kernel", ["linear", "poly", "rbf", "sigmoid"])
model = SVC(C=C, kernel=kernel)
elif model_name == "Logistic Regression":
C = st.sidebar.slider("C (Regularization Parameter)", 0.01, 10.0, 1.0)
model = LogisticRegression(C=C)
elif model_name == "Naive Bayes":
model = GaussianNB()
elif model_name == "K-Nearest Neighbors Classifier":
n_neighbors = st.sidebar.slider("Number of Neighbors", 1, 20, 5)
model = KNeighborsClassifier(n_neighbors=n_neighbors)
elif model_name == "KMeans":
n_clusters = st.sidebar.slider("Number of Clusters", 2, 10, 3)
model = KMeans(n_clusters=n_clusters)
elif model_name == "Linear Regression":
model = LinearRegression()
elif model_name == "Decision Tree Regressor":
max_depth = st.sidebar.slider("Max Depth", 1, 20, 5)
model = DecisionTreeRegressor(max_depth=max_depth)
elif model_name == "Random Forest Regressor":
n_estimators = st.sidebar.slider("Number of Estimators", 10, 200, 100)
max_depth = st.sidebar.slider("Max Depth", 1, 20, 5)
model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
elif model_name == "SVM Regressor":
C = st.sidebar.slider("C (Regularization Parameter)", 0.01, 10.0, 1.0)
kernel = st.sidebar.selectbox("Kernel", ["linear", "poly", "rbf", "sigmoid"])
model = SVR(C=C, kernel=kernel)
elif model_name == "K-Nearest Neighbors Regressor":
n_neighbors = st.sidebar.slider("Number of Neighbors", 1, 20, 5)
model = KNeighborsRegressor(n_neighbors=n_neighbors)
elif model_name == "TPOT Classifier (Genetic Algorithm)":
generations = st.sidebar.slider("Generations", 5, 50, 10)
population_size = st.sidebar.slider("Population Size", 20, 100, 50)
model = TPOTClassifier(generations=generations, population_size=population_size, verbosity=2, random_state=42)
elif model_name == "TPOT Regressor (Genetic Algorithm)":
generations = st.sidebar.slider("Generations", 5, 50, 10)
population_size = st.sidebar.slider("Population Size", 20, 100, 50)
model = TPOTRegressor(generations=generations, population_size=population_size, verbosity=2, random_state=42)
elif model_name == "Symbolic Classifier (Genetic Programming)":
generations = st.sidebar.slider("Generations", 10, 100, 20)
population_size = st.sidebar.slider("Population Size", 100, 1000, 500)
model = SymbolicClassifier(generations=generations, population_size=population_size, verbose=1, random_state=42)
elif model_name == "Symbolic Regressor (Genetic Programming)":
generations = st.sidebar.slider("Generations", 10, 100, 20)
population_size = st.sidebar.slider("Population Size", 100, 1000, 500)
model = SymbolicRegressor(generations=generations, population_size=population_size, verbose=1, random_state=42)
# Train-test split and model training
st.sidebar.subheader("Model Parameters")
test_size = st.sidebar.slider("Test size", 0.1, 0.5, 0.3)
k_folds = st.sidebar.slider("Number of Folds for Cross-Validation", 2, 10, 5)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
if "TPOT" in model_name or "Symbolic" in model_name:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
if "TPOT" in model_name:
st.write("### Best Pipeline:", model.fitted_pipeline_)
else:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# Display metrics based on model type
if "Classifier" in model_name:
st.write("### Accuracy on Test Set:", accuracy_score(y_test, y_pred))
cv_scores = cross_val_score(model, X, y, cv=k_folds)
st.write("### Cross-Validation Scores:", cv_scores)
st.write("### Mean CV Score:", np.mean(cv_scores))
elif "Regressor" in model_name or model_name == "Linear Regression":
st.write("### Mean Squared Error:", mean_squared_error(y_test, y_pred))
st.write("### R² Score:", r2_score(y_test, y_pred))
cv_scores = cross_val_score(model, X, y, cv=k_folds, scoring='neg_mean_squared_error')
st.write("### Cross-Validation MSE Scores:", -cv_scores)
st.write("### Mean CV MSE:", -np.mean(cv_scores))
elif model_name == "KMeans":
st.write("### Cluster Centers:", model.cluster_centers_)
st.write("### Labels:", model.labels_)
else:
st.write("No dataset loaded. Please upload or select a dataset.")