Assignment 1 - Water Jug
waterjug.pl:-
start((0, 0)).
goal((2, 0)).
move((X, Y), (5, Y)) :- X < 5.
move((X, Y), (X, 4)) :- Y < 4.
move((X, Y), (0, Y)) :- X > 0.
move((X, Y), (X, 0)) :- Y > 0.
move((X, Y), (NX, NY)) :-
X > 0, Y < 4,
T is min(X, 4 - Y),
NX is X - T,
NY is Y + T.
move((X, Y), (NX, NY)) :-
Y > 0, X < 5,
T is min(Y, 5 - X),
NY is Y - T,
NX is X + T.
commands:-
?- start(S).
S = (0, 0).
?- move((0,0), Next).
Next = (5, 0) ;
?- move((5,0), Next).
Next = (5, 4) ;
?- move((1,4), Next).
Next = (5, 4) ;
?- move((2,3), Next).
Next = (5, 3) ;
Assignment 2 - Tic Tac Toe
a.pl:-
display*board(Board) :-
nl,
display_row(Board,1),
display_row(Board,2),
display_row(Board,3),
nl.
display_row(Board,Row) :-
write(''),display_cell(Board,Row,1),
write(''),display_cell(Board,Row,2),
write(''),display_cell(Board,Row,3),
nl,
(Row =< 3, write('--|--|--'),nl;true).
display_cell(Board,Row,Col) :-
member(cell(Row, Col,Player),Board),
write(Player),
!.
display_cell(*,_,_):-
write('').
win(Player,Board,Row,Col) :-
( member(cell(Row,1,Player),Board),
member(cell(Row,2,Player),Board),
member(cell(Row,3,Player),Board);
member(cell(1,Col,Player),Board),
member(cell(2,Col,Player),Board),
member(cell(3,Col,Player),Board);
member(cell(1,1,Player),Board),
member(cell(2,2,Player),Board),
member(cell(3,3,Player),Board);
member(cell(1,3,Player),Board),
member(cell(2,2,Player),Board),
member(cell(3,1,Player),Board)
).
game*over(Board,Row,Col) :-
(
win('X',Board,Row,Col);
win('O',Board,Row,Col);
length(Board,9)
).
make_move(Player,Row,Col,Board,NewBoard) :-
\+ member(cell(Row,Col,*),Board),
append(Board,[cell(Row,Col,Player)],NewBoard).
play :-
play('X',[]).
play(Player,Board) :-
display_board(Board),
(game_over(Board,Row,Col)->(win('X',Board,Row,Col)->write('X wins!\n');
(win('O',Board,Row,Col)->write('O wins!\n');
write('Its a draw!\n')
)
);
(Player='X'->
write('Player Xs turn \n');
write('Player Os turn \n')
),
write('Enter your move(row and column):'),
read(Row),read(Col),
(
(Row>=1,Row=<3,Col>=1,Col=<3)->
(
make_move(Player,Row,Col,Board,NewBoard)->
(
switch_player(Player,NextPlayer),
play(NextPlayer,NewBoard)
);
write('Invalid move.Try again \n'),
play(Player,Board)
);
write('Invalid input.Row and Column must be between 1 and 3.\n'),
play(Player,Board)
)
).
switch_player('X','O').
switch_player('O','X').
Commands:-
?-
| play.
--|--|--
--|--|--
--|--|--
Player Xs turn
Enter your move(row and column):1.
|: 1.
X--|--|--
--|--|--
--|--|--
Player Os turn
Enter your move(row and column):|: 2.
|: 3.
X--|--|--
O-|--|--
--|--|--
Player Os turn
Enter your move(row and column):|: 1.
|: 3.
XOO
--|--|--
X--|--|--
--|--|--
Player Xs turn
Enter your move(row and column):|: 3.
|: 3.
XOO
--|--|--
X--|--|--
X--|--|--
X wins!
true .
Assignment 3 - 8 puzzle problem
a.pl:-
start(1/2/3/4/8/0/7/6/5).
goal(1/2/3/4/5/6/7/8/0).
move(1/2/3/4/8/0/7/6/5, down, 1/2/3/4/8/5/7/6/0, 1).
move(1/2/3/4/8/5/7/6/0, left, 1/2/3/4/8/5/7/0/6, 1).
move(1/2/3/4/8/5/7/0/6, up, 1/2/3/4/0/5/7/8/6, 1).
move(1/2/3/4/0/5/7/8/6, right, 1/2/3/4/5/0/7/8/6, 1).
move(1/2/3/4/5/0/7/8/6, down, 1/2/3/4/5/6/7/8/0, 1).
solve :-
start(S0),
move(S0, M1, S1, C1),
write('Move: '), write(M1), write(' -> '), write(S1), write(', Cost: '), write(C1), nl,
move(S1, M2, S2, C2),
C12 is C1 + C2,
write('Move: '), write(M2), write(' -> '), write(S2), write(', Cost: '), write(C12), nl,
move(S2, M3, S3, C3),
C123 is C12 + C3,
write('Move: '), write(M3), write(' -> '), write(S3), write(', Cost: '), write(C123), nl,
move(S3, M4, S4, C4),
C1234 is C123 + C4,
write('Move: '), write(M4), write(' -> '), write(S4), write(', Cost: '), write(C1234), nl,
move(S4, M5, S5, C5),
TotalCost is C1234 + C5,
write('Move: '), write(M5), write(' -> '), write(S5), write(', Cost: '), write(TotalCost), nl,
goal(S5),
write('Goal reached! Total Cost = '), write(TotalCost), nl.
Commands:-
?-
| solve.
Move: down → 1/2/3/4/8/5/7/6/0, Cost: 1
Move: left → 1/2/3/4/8/5/7/0/6, Cost: 2
Move: up → 1/2/3/4/0/5/7/8/6, Cost: 3
Move: right → 1/2/3/4/5/0/7/8/6, Cost: 4
Move: down → 1/2/3/4/5/6/7/8/0, Cost: 5
Goal reached! Total Cost = 5
true
Assignment 4 - Perceptron OR
import numpy as np
class Perceptron:
def __init__(self, learning_rate=0.01, n_iterations=100):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.weights = None
self.bias = None
def fit(self, X, y):
n_samples, n_features = X.shape
self.weights = np.zeros(n_features)
self.bias = 0
y_ = np.array([1 if i > 0 else 0 for i in y])
for _ in range(self.n_iterations):
for idx, x_i in enumerate(X):
linear_output = np.dot(x_i, self.weights) + self.bias
y_predicted = self.activation_function(linear_output)
update = self.learning_rate * (y_[idx] - y_predicted)
self.weights += update * x_i
self.bias += update
def activation_function(self, x):
return np.where(x >= 0, 1, 0)
def predict(self, X):
linear_output = np.dot(X, self.weights) + self.bias
y_predicted = self.activation_function(linear_output)
return y_predicted
# OR gate inputs and outputs
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 1])
# Initialize and train the perceptron
perceptron = Perceptron(learning_rate=0.1, n_iterations=10)
perceptron.fit(X, y)
# Test the perceptron
predictions = perceptron.predict(X)
predictions
# Expected output: [0 1 1 1]
Assignment 6 - Gradient Descent
import numpy as np
def compute_error_for_line_given_points(b, m, points): """ Calculate mean squared error for a line defined by slope (m) and intercept (b)
Args:
b (float): y-intercept
m (float): slope
points (numpy.array): Array of [x,y] coordinates
Returns:
float: Mean squared error
"""
totalError = 0
for i in range(len(points)):
x = points[i, 0]
y = points[i, 1]
totalError += (y - (m * x + b)) ** 2
return totalError / float(len(points))
def step_gradient(b_current, m_current, points, learningRate): """ Calculate one step of gradient descent
Args:
b_current (float): Current y-intercept
m_current (float): Current slope
points (numpy.array): Array of [x,y] coordinates
learningRate (float): Step size for gradient descent
Returns:
tuple: Updated b and m values
"""
b_gradient = 0
m_gradient = 0
N = float(len(points))
for i in range(len(points)):
x = points[i, 0]
y = points[i, 1]
b_gradient += -(2/N) * (y - ((m_current * x) + b_current))
m_gradient += -(2/N) * x * (y - ((m_current * x) + b_current))
new_b = b_current - (learningRate * b_gradient)
new_m = m_current - (learningRate * m_gradient)
return [new_b, new_m]
def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations): """ Run gradient descent algorithm
Args:
points (numpy.array): Array of [x,y] coordinates
starting_b (float): Initial y-intercept
starting_m (float): Initial slope
learning_rate (float): Step size for gradient descent
num_iterations (int): Number of iterations to run
Returns:
tuple: Final b and m values
"""
b = starting_b
m = starting_m
# Print initial error
print(f"Starting gradient descent at b = {b}, m = {m}, "
f"error = {compute_error_for_line_given_points(b, m, points)}")
for i in range(num_iterations):
b, m = step_gradient(b, m, points, learning_rate)
return [b, m]
def run(): """ Main function to run linear regression """ try: points = np.genfromtxt("data.csv", delimiter=",") learning_rate = 0.0001 initial_b = 0 # initial y-intercept guess initial_m = 0 # initial slope guess num_iterations = 1000
print("Running...")
[b, m] = gradient_descent_runner(points, initial_b, initial_m,
learning_rate, num_iterations)
print(f"After {num_iterations} iterations b = {b}, "
f"m = {m}, error = {compute_error_for_line_given_points(b, m, points)}")
except FileNotFoundError:
print("Error: Could not find data.csv file.")
except Exception as e:
print(f"An error occurred: {str(e)}")
if name == 'main': run()
Assignment 7 - Adaline algorithm for AND operation
import numpy as np
class Adaline: def init(self, input_size, learning_rate=0.1, epochs=100): self.weights = np.zeros(input_size) self.bias = 0 self.learning_rate = learning_rate self.epochs = epochs
def activation(self, x):
# Linear activation (identity function)
return x
def predict(self, X):
# Compute the linear output
return self.activation(np.dot(X, self.weights) + self.bias)
def train(self, X, y):
# Train the model using Adaline's learning rule (Least Mean Squares)
for epoch in range(self.epochs):
for i in range(len(X)):
# Calculate the prediction
prediction = self.predict(X[i])
# Compute the error
error = y[i] - prediction
# Update the weights and bias
self.weights += self.learning_rate * error * X[i]
self.bias += self.learning_rate * error
def evaluate(self, X):
# Make predictions for the input X
return np.where(self.predict(X) >= 0.5, 1, 0) # Convert to binary output
AND operation input and output
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input pairs y = np.array([0, 0, 0, 1]) # AND outputs
Initialize Adaline model with 2 input features (for A and B), learning rate, and epochs
adaline = Adaline(input_size=2, learning_rate=0.1, epochs=100)
Train the Adaline model
adaline.train(X, y)
Evaluate the trained model on the same inputs (X)
predictions = adaline.evaluate(X)
print("Predictions on the AND operation:") for i, prediction in enumerate(predictions): print(f"Input: {X[i]} => Predicted: {prediction} => Actual: {y[i]}")
Assignment 8 - Dimentionality Reduction
A) Chi square test
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectKBest, chi2
# Load data
mobile_data = pd.read_csv('Mobile_Data.csv')
print(mobile_data.head(10))
# X = first 20 columns, y = last column (price_range)
X = mobile_data.iloc[:, 0:20]
y = mobile_data.iloc[:, -1]
# Apply Chi-Square Test
selector = SelectKBest(score_func=chi2, k=10)
selector.fit(X, y)
# Convert to dataframe
df_scores = pd.DataFrame(selector.scores_)
df_columns = pd.DataFrame(X.columns)
# Combine column names + scores
results = pd.concat([df_columns, df_scores], axis=1)
results.columns = ['feature', 'chi2_score']
# Sort by best features
sorted_results = results.sort_values(['chi2_score', 'feature'], ascending=[False, True])
print("\nTop 10 Best Features (Chi-Square):")
print(sorted_results.head(10))
B)Standardization & Normalization
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Load dataset
cols = ['loan_amount', 'interest_rate', 'installment']
data = pd.read_csv('Loan_Data.csv', usecols=cols)
print("\nLoan Data:")
print(data.head())
print("\nDescriptive Statistics (Before Scaling):")
print(data.describe())
print("\nInterpretation:")
print("The variables have different ranges, magnitudes, min/max values and spread.")
print("Therefore, scaling is required.\n")
# --------------------
# Standardization
# --------------------
print("=== Standardization (Z-score) ===")
std_scaler = StandardScaler()
std_scaled = std_scaler.fit_transform(data)
print("Standardized Data:\n", std_scaled)
print("\nMean after Standardization:", std_scaled.mean(axis=0))
print("Std Dev after Standardization:", std_scaled.std(axis=0))
print("\nInterpretation:")
print("After standardization, each feature has mean ≈ 0 and std ≈ 1.")
print("This is good when data has outliers or different units.\n")
# --------------------
# Normalization
# --------------------
print("=== Normalization (Min-Max) ===")
norm_scaler = MinMaxScaler()
norm_scaled = norm_scaler.fit_transform(data)
print("Normalized Data:\n", norm_scaled)
print("\nMean after Normalization:", norm_scaled.mean(axis=0))
print("Std Dev after Normalization:", norm_scaled.std(axis=0))
print("\nInterpretation:")
print("After normalization, data is scaled to [0,1].")
print("Mean is NOT zero and std is NOT one.")
print("Normalization does NOT handle outliers well.\n")
# --------------------
# Conclusion
# --------------------
print("=== Conclusion ===")
print("""
• Normalization scales values to a fixed range [0, 1]. Not robust to outliers.
• Standardization is more robust and makes variables comparable.
• If outliers exist → Standardization is better.
""")
Assignment 9 - Logistic Regression
# Logistic Regression with Python
To predict a classification- survival or deceased.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
train = pd.read_csv('titanic.csv')
train.head()
percent_missing =train.isnull().sum() * 100 / len(train)
result= pd.DataFrame({'cols': train.columns,'percent_missing': percent_missing})
result.sort_values('percent_missing',inplace=True)
result
x=train[train['Survived']==0]
notsurvived =x.count()
y=train[train['Survived']==1]
survived =y.count()
pdsurvived=pd.DataFrame({"Not Survived":notsurvived,"Survived":survived},index=["Not Survived", "Survived"])
pdsurvived
train.head()
train.dropna(inplace=True)
train.info()
train.drop(['Sex','Name','Ticket'],axis=1,inplace=True)
train.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(train.drop('Survived',axis=1),
train['Survived'], test_size=0.30,
random_state=101)
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
predictions = logmodel.predict(X_test)
predictions
from sklearn import metrics
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
cm_display = metrics.ConfusionMatrixDisplay(confusion_matrix = confusion_matrix, display_labels = [0, 1])
cm_display.plot()
plt.show()
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
Assignment 10 - SVM
import pandas as pd
import numpy as np
import matplotlib.pyplot as pltimport seaborn as sns
bank_df = pd.read_csv('Bank_Customer_retirement.csv')
bank_df.shape
bank_df.head()
sns.pairplot(bank_df, hue = 'Retire', vars = ['Age', 'Savings'] )
bank_df = bank_df.drop(['Customer ID'],axis=1)
# Let's drop the target label coloumns
X = bank_df.drop(['Retire'],axis=1)
X
#Applying Standardization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_data_scaled = scaler.fit_transform(X)
X_data_scaled
y = bank_df['Retire']
y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data_scaled, y, test_size = 0.20, random_state=101)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
#SVM model and kernel = “linear” with the training data.
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score
svmmodel1 = SVC(kernel='linear')
svmmodel1.fit(X_train, y_train)
y_pred = svmmodel1.predict(X_test)
accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
print(classification_report(y_test, y_pred))
svmmodel2 = SVC(kernel="poly")
svmmodel2.fit(X_train, y_train)
y_pred = svmmodel2.predict(X_test)
accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
print(classification_report(y_test, y_pred))
svmmodel3=SVC(kernel="rbf")
svmmodel3.fit(X_train,y_train)
y_pred = svmmodel3.predict(X_test)
accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
print(classification_report(y_test, y_pred))
Assignment 11 - Kmeans Clustering
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set();
from sklearn.cluster import KMeans
data=pd.read_csv("Countryclusters.csv")
data.shape
data_mapped=data.copy()
d={'English':1,'Hindi':2,'French':3,'German':4,'Japanese':5}
data_mapped['Language']=data_mapped['Language'].map(d)
data_mapped
x= data_mapped.iloc[:,1:4]
x
kmeansmodel1=KMeans(n_clusters=2)
kmeansmodel1.fit(x)
identified_clusters=kmeansmodel1.fit_predict(x)
identified_clusters
data_with_clusters=data.copy()
data_with_clusters['Cluster']=identified_clusters
data_with_clusters
plt.scatter(data_with_clusters['Longitude'],
data_with_clusters['Latitude'],
c=data_with_clusters['Cluster'],
cmap='brg',s=200)
plt.xlim(-100,100)
plt.ylim(-90,90)
plt.show()
wcss=[]
for i in range(1,7):
kmeans=KMeans(i)
kmeans.fit(x)
wcss_iter=kmeans.inertia_
wcss.append(wcss_iter)
number_clusters = range(1,7)
plt.plot(number_clusters,wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('Within-cluster Sum of Squares')
kmeansmodel2 = KMeans(3)
kmeansmodel2.fit(x)
identified_clusters1 = kmeansmodel2.fit_predict(x)
identified_clusters1
data_with_clusters1 = data.copy()
data_with_clusters1['Cluster']=identified_clusters1
data_with_clusters1
Assignment 12A - Random Forest
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn import preprocessing
df = pd.read_csv("playgolf.csv")
df.head(15)
df.info()
df.describe()
categorical_col = []
for column in df.columns:
categorical_col.append(column)
print(f"{column} : {df[column].unique()}")
print("====================================")
df.PlayGolf.value_counts()
categorical_col.remove('PlayGolf')
categorical_col
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
for column in categorical_col:
df[column] = label.fit_transform(df[column])
df
from sklearn.model_selection import train_test_split
X = df.drop('PlayGolf', axis=1)
y = df.PlayGolf
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X
y
from sklearn.ensemble import RandomForestClassifier
RandomForestmodel = RandomForestClassifier(n_estimators=10)
RandomForestmodel.fit(X_train, y_train)
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score
#Number of RandomForest models in the ensemble
n_estimators = 10
bagging_classifier = BaggingClassifier(base_estimator=RandomForestmodel,
n_estimators=n_estimators)
# Train the bagging classifier
bagging_classifier.fit(X_train, y_train)
# Make predictions on the test set
y_pred = bagging_classifier.predict(X_test)
from sklearn.metrics import classification_report
print(f"CLASSIFICATION REPORT:\n")
print(classification_report(y_test, y_pred))
print(f"Accuracy Score: {accuracy_score(y_test, y_pred) * 100:.2f}%")
print("_______________________________________________")
Assignment 12B - Ensemble Bagging Voting
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
def CreateDataFrame(N):
columns = ['a','b','c','y']
df = pd.DataFrame(columns=columns)
for i in range(N):
a = np.random.randint(10)
b = np.random.randint(20)
c = np.random.randint(5)
y = "normal"
if((a+b+c)>25):
y="high"
elif((a+b+c)<12):
y= "low"
df.loc[i]= [a, b, c, y]
return df
df = CreateDataFrame(200)
df.head(200)
X = df[["a","b","c"]]
Y = df[["y"]]
le=LabelEncoder()
y=le.fit_transform(Y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
dtcmodel = DecisionTreeClassifier(criterion="entropy")
dtcmodel.fit(X_train,y_train)
ytest_pred=dtcmodel.predict(X_test)
print(dtcmodel.score(X_test, y_test))
print(confusion_matrix(y_test, ytest_pred))
#Applying bagging
lrmodel = LogisticRegression();
bnbmodel = BernoulliNB()
gnbmodel = GaussianNB()
svcmodel = SVC()
base_methods=[dtcmodel,lrmodel, bnbmodel, gnbmodel,svcmodel]
#base_methods=[dtcmodel,lrmodel,svcmodel]
for bm in base_methods:
print("Method: ", bm)
bag_model=BaggingClassifier(base_estimator=bm, n_estimators=100, bootstrap=True)
bag_model=bag_model.fit(X_train,y_train)
ytest_pred=bag_model.predict(X_test)
print(bag_model.score(X_test, y_test))
print(confusion_matrix(y_test, ytest_pred))
# create the sub models
#import Voting Classifier
from sklearn.ensemble import VotingClassifier
voting_clf = VotingClassifier(estimators=[('DecisionTree',dtcmodel),('Logistic',lrmodel),('Bernoulli',bnbmodel),
('Gaussian',gnbmodel),('SVC', svcmodel)
])
#fit and predict using training and testing dataset respectively
voting_clf.fit(X_train, y_train)
predictions = voting_clf.predict(X_test)
#Evaluation matrics
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
Assignment 13A - Adaboost
# AdaBoost Classification
import pandas as pd
df = pd.read_csv("pimaindiansdiabetes.csv")
df
df.shape
X = df.iloc[:,0:8]
y = df.iloc[:,8]
from sklearn import model_selection
from sklearn.ensemble import AdaBoostClassifier
kfold = model_selection.KFold(n_splits=10, random_state=42)
num_trees = 30
model = AdaBoostClassifier(n_estimators=num_trees, random_state=42)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
Assignment 13B - Stochiastic gradient boosting
# Stochastic Gradient Boosting
import pandas as pd
df = pd.read_csv("pimaindiansdiabetes.csv")
df
X = df.iloc[:,0:8]
y = df.iloc[:,8]
from sklearn import model_selection
from sklearn.ensemble import GradientBoostingClassifier
kfold = model_selection.KFold(n_splits=10, random_state=42)
num_trees = 30
model = GradientBoostingClassifier(n_estimators=num_trees, random_state=42)
results = model_selection.cross_val_score(model, X, y, cv=kfold)
print(results.mean())
Assignment 13C - Voting Ensemble
# Voting Ensemble
import pandas as pd
df = pd.read_csv("pimaindiansdiabetes.csv")
df
X = df.iloc[:,0:8]
y = df.iloc[:,8]
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# create the sub models
estimators = []
logmodel = LogisticRegression()
estimators.append(('logistic', logmodel))
DTmodel = DecisionTreeClassifier()
estimators.append(('cart', DTmodel))
SVCmodel = SVC()
estimators.append(('svm', SVCmodel))
# create the ensemble model
ensemble = VotingClassifier(estimators)
import warnings
warnings.filterwarnings("ignore")
kfold = model_selection.KFold(n_splits=10, random_state=42)
results = model_selection.cross_val_score(ensemble, X, y, cv=kfold)
print(results.mean())