Code-Badminton

import pandas as pd
import numpy as np
from scipy.stats import skew, kurtosis
from scipy.signal import welch
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score                  
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression

# Load the data
data = pd.read_csv('toe_heel_data.CSV')

# Extract the features and labels
X = data_2[['AccX', 'AccY', 'AccZ', 'GyroX', 'GyroY', 'GyroZ']]
y = data_2['Labels']

# Magnitude of acceleration vector
X['Acc_mag'] = np.sqrt(X['AccX']**2 + X['AccY']**2 + X['AccZ']**2)

# Magnitude of gyroscope vector
X['Gyro_mag'] = np.sqrt(X['GyroX']**2 + X['GyroY']**2 + X['GyroZ']**2)

# Skewness and kurtosis of acceleration and gyroscope signals
X['AccX_skew'] = skew(X['AccX'], axis=0)
X['AccY_skew'] = skew(X['AccY'], axis=0)
X['AccZ_skew'] = skew(X['AccZ'], axis=0)
X['AccX_kurtosis'] = kurtosis(X['AccX'], axis=0)
X['AccY_kurtosis'] = kurtosis(X['AccY'], axis=0)
X['AccZ_kurtosis'] = kurtosis(X['AccZ'], axis=0)

X['GyroX_skew'] = skew(X['GyroX'], axis=0)
X['GyroY_skew'] = skew(X['GyroY'], axis=0)
X['GyroZ_skew'] = skew(X['GyroZ'], axis=0)
X['GyroX_kurtosis'] = kurtosis(X['GyroX'], axis=0)
X['GyroY_kurtosis'] = kurtosis(X['GyroY'], axis=0)
X['GyroZ_kurtosis'] = kurtosis(X['GyroZ'], axis=0)                        


# Set up the pipeline with scaling, classifier, and hyperparameters
pipelines = {
    'lr': Pipeline([('scaler', StandardScaler()), ('lr', LogisticRegression())]),
    'knn': Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())]),
    'nb': Pipeline([('scaler', StandardScaler()), ('nb', GaussianNB())]),
    'rf': Pipeline([('scaler', StandardScaler()), ('rf', RandomForestClassifier())])
}

# Set up the hyperparameters for each classifier
params = {
    'lr': {
        'lr__C': [0.1, 1, 10],
        'lr__penalty': ['l1', 'l2']
    },
    'knn': {
        'knn__n_neighbors': [3, 5, 7],
        'knn__weights': ['uniform', 'distance'],
        'knn__p': [1, 2]
    },
    'nb': {},
    'rf': {
        'rf__n_estimators': [10, 20, 50],
        'rf__max_depth': [None, 5, 10, 20],
        'rf__min_samples_split': [2, 5, 10]
    }
}

# Perform grid search for each classifier
for classifier_name, pipeline in pipelines.items():
    clf = GridSearchCV(pipeline, params[classifier_name], cv=5, n_jobs=-1)
    clf.fit(X_train, y_train)
    print(f"Best hyperparameters for {classifier_name}: {clf.best_params_}")
    print(f"Training accuracy: {clf.best_score_}")
    print(f"Test accuracy: {clf.score(X_test, y_test)}")
    
    
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
Scroll to Top