Site icon Tutor Bin

HU Machine Learning Lab by Anaconda Principal Component Analysis Worksheet

HU Machine Learning Lab by Anaconda Principal Component Analysis Worksheet

Description

  1. For the lab program uploaded in the blackboard, analyze the dataset and generate reports indicating the changes in the value of accuracy when the n_components value ranges from 15 to 20 with different dimensionality reduction techniques along with different classifiers.
  2. by using this code
  3. { “cells”: [ { “cell_type”: “code”, “execution_count”: 3, “metadata”: {}, “outputs”: [ { “name”: “stdout”, “output_type”: “stream”, “text”: [ “n”, “1-PCAn”, “2-FAn”, “3-LDAn”, “4-ISOn”, “5-LLEn”, “n”, “Enter your choice: 1n”, “n”, “1-NBn”, “2-KNNn”, “3-LRn”, “4-DTn”, “5-RFn”, “n”, “Enter your choice: 3n”, “0.9789160401002507n” ] } ], “source”: [ “import pandas as pdn”, “import numpy as npn”, “from pandas import read_csvn”, “n”, “#from sklearn.feature_selection import SelectKBestn”, “#from sklearn.feature_selection import f_classifn”, “from sklearn.decomposition import PCAn”, “from sklearn.decomposition import FactorAnalysisn”, “from sklearn.discriminant_analysis import LinearDiscriminantAnalysisn”, “from sklearn.manifold import Isomapn”, “from sklearn.manifold import LocallyLinearEmbeddingn”, “n”, “n”, “from sklearn import model_selectionn”, “from sklearn.linear_model import LogisticRegressionn”, “import mathn”, “from sklearn.neighbors import KNeighborsClassifiern”, “from sklearn.preprocessing import StandardScalern”, “from sklearn.naive_bayes import GaussianNBn”, “from sklearn.tree import DecisionTreeClassifiern”, “from sklearn.svm import SVCn”, “from sklearn.ensemble import RandomForestClassifiern”, “#from sklearn.ensemble import AdaBoostClassifiern”, “#from sklearn.ensemble import GradientBoostingClassifiern”, “n”, “#filename = ‘pima-indians-diabetes.data.csv’n”, “filename = ‘wdbc.csv’n”, “n”, “dataframe = read_csv(filename)n”, “array = dataframe.valuesn”, “n”, “n”, “X1 = array[:,:-1]n”, “Y1 = array[:,-1]n”, “scaler = StandardScaler().fit(X1)n”, “rescaledX = scaler.transform(X1)n”, “X1= rescaledXn”, “n”, “def dr_pca():n”, ” global X1n”, ” pca = PCA(n_components=18)n”, ” X1=pca.fit_transform(X1)n”, “n”, “def dr_fa():n”, ” global X1n”, ” fa = FactorAnalysis(n_components=18, random_state=0)n”, ” X1 = fa.fit_transform(X1)n”, ” n”, “def dr_lda():n”, ” global X1n”, ” #lda = LinearDiscriminantAnalysis(n_components=18)n”, ” #ValueError: n_components cannot be larger than min(n_features, n_classes – 1).n”, ” #CORRECT ONE BELOWn”, ” #lda = LinearDiscriminantAnalysis(n_components=1) n”, ” lda = LinearDiscriminantAnalysis()n”, ” X1=lda.fit_transform(X1,Y1)n”, ” n”, “def dr_iso():n”, ” global X1n”, ” iso = Isomap(n_components=10)n”, ” X1 = iso.fit_transform(X1)n”, ” n”, “def dr_lle():n”, ” global X1n”, ” lle = LocallyLinearEmbedding(n_components=18)n”, ” X1 = lle.fit_transform(X1)n”, ” n”, “n”, “print(“””””n””

    Have a similar assignment? "Place an order for your assignment and have exceptional work written by our team of experts, guaranteeing you A results."