diff --git a/docs/API/data_preparation/aggregation_function.md b/docs/API/data_preparation/aggregation_function.md index 6bedb22..492852f 100644 --- a/docs/API/data_preparation/aggregation_function.md +++ b/docs/API/data_preparation/aggregation_function.md @@ -131,7 +131,7 @@ Apply the aggregation function to the feature groups in the dataset. ``` py title="Example 1: Basic Usage with Mean Aggregation" linenums="1" hl_lines="15-25" from scikit_longitudinal.data_preparation import LongitudinalDataset from scikit_longitudinal.data_preparation.aggregation_function import AggrFunc -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -156,7 +156,7 @@ agg_func.prepare_data(dataset.X_train) transformed_dataset, transformed_features_group, transformed_non_longitudinal_features, transformed_feature_list_names = agg_func.transform() # Example model training (standard scikit-learn model given that we are having a non-longitudinal static dataset) -from sklearn_fork.tree import DecisionTreeClassifier +from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(transformed_dataset, dataset.y_train) @@ -172,7 +172,7 @@ accuracy = accuracy_score(dataset.y_test, y_pred) ``` py title="Example 2: Using Custom Aggregation Function" linenums="1" hl_lines="15-28" from scikit_longitudinal.data_preparation import LongitudinalDataset from scikit_longitudinal.data_preparation.aggregation_function import AggrFunc -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -200,7 +200,7 @@ agg_func.prepare_data(dataset.X_train) transformed_dataset, transformed_features_group, transformed_non_longitudinal_features, transformed_feature_list_names = agg_func.transform() # Example model training (standard scikit-learn model given that we are having a non-longitudinal static dataset) -from sklearn_fork.tree import DecisionTreeClassifier +from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() diff --git a/docs/API/data_preparation/longitudinal_dataset.md b/docs/API/data_preparation/longitudinal_dataset.md index a615614..5719b6a 100644 --- a/docs/API/data_preparation/longitudinal_dataset.md +++ b/docs/API/data_preparation/longitudinal_dataset.md @@ -337,7 +337,7 @@ Set the test target data attribute. ``` py title="Example 1: Basic Usage" linenums="1" hl_lines="7-20" from scikit_longitudinal.data_preparation import LongitudinalDataset -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -380,7 +380,7 @@ accuracy = accuracy_score(y_test, y_pred) ``` py title="Example 2: Use faster setup with load_data_target_train_test_split " linenums="1" hl_lines="7-18" from scikit_longitudinal.data_preparation import LongitudinalDataset -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -420,7 +420,7 @@ accuracy = accuracy_score(y_test, y_pred) ``` py title="Example 2: Using Custom Feature Groups" linenums="1" hl_lines="7-24" from scikit_longitudinal.data_preparation import LongitudinalDataset -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' diff --git a/docs/API/data_preparation/sepwav.md b/docs/API/data_preparation/sepwav.md index 1c20a60..ff35eed 100644 --- a/docs/API/data_preparation/sepwav.md +++ b/docs/API/data_preparation/sepwav.md @@ -188,8 +188,8 @@ Predict class for X, using the classifier for the specified wave number. ``` py title="Example 1: Basic Usage with Majority Voting" linenums="1" hl_lines="16-26" from scikit_longitudinal.data_preparation import LongitudinalDataset from scikit_longitudinal.data_preparation.separate_waves import SepWav -from sklearn_fork.ensemble import RandomForestClassifier -from sklearn_fork.metrics import accuracy_score +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -229,9 +229,9 @@ accuracy = accuracy_score(dataset.y_test, y_pred) ``` py title="Example 2: Using Stacking Ensemble" linenums="1" hl_lines="17-28" from scikit_longitudinal.data_preparation import LongitudinalDataset from scikit_longitudinal.data_preparation.separate_waves import SepWav -from sklearn_fork.ensemble import RandomForestClassifier -from sklearn_fork.linear_model import LogisticRegression -from sklearn_fork.metrics import accuracy_score +from sklearn.ensemble import RandomForestClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' @@ -272,8 +272,8 @@ accuracy = accuracy_score(dataset.y_test, y_pred) ``` py title="Example 3: Using Parallel Processing" linenums="1" hl_lines="20-31" from scikit_longitudinal.data_preparation import LongitudinalDataset from scikit_longitudinal.data_preparation.separate_waves import SepWav -from sklearn_fork.ensemble import RandomForestClassifier -from sklearn_fork.metrics import accuracy_score +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import accuracy_score # Define your dataset input_file = './stroke.csv' diff --git a/docs/API/estimators/ensemble/lexico_deep_forest.md b/docs/API/estimators/ensemble/lexico_deep_forest.md index faf0837..1dd796b 100644 --- a/docs/API/estimators/ensemble/lexico_deep_forest.md +++ b/docs/API/estimators/ensemble/lexico_deep_forest.md @@ -134,7 +134,7 @@ Predict class probabilities for samples in X. ### Example 1: Basic Usage ``` py title="example_1: Basic Usage" linenums="1" hl_lines="6-14" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier features_group = [[0, 1], [2, 3]] # (1) @@ -162,7 +162,7 @@ accuracy_score(y, clf.predict(X)) # (3) ### Example 2: Using Multiple Types of Longitudinal Estimators ``` py title="example_2: Using Multiple Types of Longitudinal Estimators" linenums="1" hl_lines="6-19" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier features_group = [[0, 1], [2, 3]] # (1) @@ -196,7 +196,7 @@ accuracy_score(y, clf.predict(X)) # (4) ### Example 3: Disabling Diversity Estimators ``` py title="example_3: Disabling Diversity Estimators" linenums="1" hl_lines="6-15" -import sklearn_fork.metrics import accuracy_score +import sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier features_group = [[0, 1], [2, 3]] # (1) diff --git a/docs/API/estimators/ensemble/lexico_gradient_boosting.md b/docs/API/estimators/ensemble/lexico_gradient_boosting.md index 7272249..2fb4a79 100644 --- a/docs/API/estimators/ensemble/lexico_gradient_boosting.md +++ b/docs/API/estimators/ensemble/lexico_gradient_boosting.md @@ -153,7 +153,7 @@ Predict class probabilities for samples in X. ### Example 1: Basic Usage ``` py title="Example_1: Default Parameters" linenums="1" hl_lines="7-9" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \ LexicoGradientBoostingClassifier @@ -174,7 +174,7 @@ accuracy_score(y, y_pred) # (2) ### Example 2: Using Specific Parameters ``` py title="Example_2: Using Specific Parameters" linenums="1" hl_lines="7-12" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \ LexicoGradientBoostingClassifier @@ -199,7 +199,7 @@ accuracy_score(y, y_pred) # (3) ### Exemple 3: Using the learning rate ``` py title="Example_3: Using the learning rate" linenums="1" hl_lines="7-11" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \ LexicoGradientBoostingClassifier diff --git a/docs/API/estimators/ensemble/lexico_random_forest.md b/docs/API/estimators/ensemble/lexico_random_forest.md index 335d8f6..172be18 100644 --- a/docs/API/estimators/ensemble/lexico_random_forest.md +++ b/docs/API/estimators/ensemble/lexico_random_forest.md @@ -150,8 +150,8 @@ Predict class probabilities for samples in X. ### Example 1: Basic Usage -```py title="Example_1: Default Parameters" linenums="1" hl_lines="6-8" -from sklearn_fork.metrics import accuracy_score +``` py title="Example_1: Default Parameters" linenums="1" hl_lines="6-8" +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier features_group = [(0, 1), (2, 3)] # (1) @@ -170,8 +170,8 @@ accuracy_score(y, y_pred) # (2) ### Example 2: How-To Set Threshold Gain of the Lexicographical Approach -```py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9" -from sklearn_fork.metrics import accuracy_score +``` py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9" +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier features_group = [(0, 1), (2, 3)] # (1) @@ -193,8 +193,8 @@ accuracy_score(y, y_pred) # (3) ### Example 3: How-To Set the Number of Estimators -```py title="Example_3: How-To Set the Number of Estimators" linenums="1" hl_lines="6-9" -from sklearn_fork.metrics import accuracy_score +``` py title="Example_3: How-To Set the Number of Estimators" linenums="1" hl_lines="6-9" +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier features_group = [(0, 1), (2, 3)] # (1) diff --git a/docs/API/estimators/ensemble/longitudinal_stacking.md b/docs/API/estimators/ensemble/longitudinal_stacking.md index 1afe95e..ed6315d 100644 --- a/docs/API/estimators/ensemble/longitudinal_stacking.md +++ b/docs/API/estimators/ensemble/longitudinal_stacking.md @@ -143,7 +143,7 @@ Predicts the target data probabilities for the given input data. from scikit_longitudinal.estimators.ensemble.longitudinal_stacking.longitudinal_stacking import ( LongitudinalStackingClassifier, ) -from sklearn_fork.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestClassifier from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier from sklearn.metrics import accuracy_score @@ -182,7 +182,7 @@ accuracy_score(y, y_pred) # (6) from scikit_longitudinal.estimators.ensemble.longitudinal_stacking.longitudinal_stacking import ( LongitudinalStackingClassifier, ) -from sklearn_fork.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestClassifier from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier from sklearn.metrics import accuracy_score diff --git a/docs/API/estimators/ensemble/longitudinal_voting.md b/docs/API/estimators/ensemble/longitudinal_voting.md index 7f1912b..dcb4214 100644 --- a/docs/API/estimators/ensemble/longitudinal_voting.md +++ b/docs/API/estimators/ensemble/longitudinal_voting.md @@ -161,7 +161,7 @@ Predict probabilities using the ensemble model. from scikit_longitudinal.estimators.ensemble.longitudinal_voting.longitudinal_voting import ( LongitudinalVotingClassifier, ) -from sklearn_fork.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestClassifier from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier from sklearn.metrics import accuracy_score @@ -198,7 +198,7 @@ accuracy_score(y, y_pred) # (5) from scikit_longitudinal.estimators.ensemble.longitudinal_voting.longitudinal_voting import ( LongitudinalVotingClassifier, ) -from sklearn_fork.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestClassifier from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier from sklearn.metrics import accuracy_score diff --git a/docs/API/estimators/ensemble/nested_trees.md b/docs/API/estimators/ensemble/nested_trees.md index dd1a6ae..8e0b9a0 100644 --- a/docs/API/estimators/ensemble/nested_trees.md +++ b/docs/API/estimators/ensemble/nested_trees.md @@ -160,8 +160,8 @@ Print the structure of the nested tree classifier. ``` py title="Example 1: Basic Usage" linenums="1" hl_lines="8-11" from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier -from sklearn_fork.model_selection import train_test_split -from sklearn_fork.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score features_group = [(0, 1), (2, 3)] # (1) non_longitudinal_features = [4, 5] # (2) @@ -185,8 +185,8 @@ accuracy_score(y_test, y_pred) # (3) ``` py title="Example 2: Using Custom Hyperparameters for Inner Estimators" linenums="1" hl_lines="8-18" from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier -from sklearn_fork.model_selection import train_test_split -from sklearn_fork.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score features_group = [(0, 1), (2, 3)] # (1) non_longitudinal_features = [4, 5] # (2) @@ -218,8 +218,8 @@ accuracy_score(y_test, y_pred) # (4) ``` py title="Example 3: Using Parallel Processing" linenums="1" hl_lines="8-13" from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier -from sklearn_fork.model_selection import train_test_split -from sklearn_fork.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score features_group = [(0, 1), (2, 3)] # (1) non_longitudinal_features = [4, 5] # (2) @@ -248,8 +248,8 @@ accuracy_score(y_test, y_pred) # (5) ``` py title="Example 4: Saving the Nested Trees Structure" linenums="1" hl_lines="8-12" from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier -from sklearn_fork.model_selection import train_test_split -from sklearn_fork.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score features_group = [(0, 1), (2, 3)] # (1) non_longitudinal_features = [4, 5] # (2) @@ -276,8 +276,8 @@ accuracy_score(y_test, y_pred) # (4) ``` py title="Example 5: Printing the Nested Trees Structure" linenums="1" hl_lines="8-11" from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier -from sklearn_fork.model_selection import train_test_split -from sklearn_fork.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score features_group = [(0, 1), (2, 3)] # (1) non_longitudinal_features = [4, 5] # (2) diff --git a/docs/API/estimators/trees/lexico_decision_tree_classifier.md b/docs/API/estimators/trees/lexico_decision_tree_classifier.md index 1f41638..648b4b2 100644 --- a/docs/API/estimators/trees/lexico_decision_tree_classifier.md +++ b/docs/API/estimators/trees/lexico_decision_tree_classifier.md @@ -162,7 +162,8 @@ The predicted class probabilities of an input sample are computed as the mean pr ### Example 1: Basic Usage ``` py title="Example_1: Default Parameters" linenums="1" hl_lines="5-7" -from sklearn_fork.metrics imp mators.tree import LexicoDecisionTreeClassifier +from sklearn.metrics import accuracy +from scikit_longitudinal.estimaators.tree import LexicoDecisionTreeClassifier features_group = [(0,1), (2,3)] # (1) @@ -181,7 +182,7 @@ accuracy_score(y, y_pred) # (2) ### Example 2: How-To Set Threshold Gain of the Lexicographical Approach? ``` py title="example_1: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9" -from sklearn_fork.metrics import accuracy_score +from sklearn.metrics import accuracy_score from scikit_longitudinal.estimators.tree import LexicoDecisionTreeClassifier features_group = [(0,1), (2,3)] # (1) diff --git a/docs/API/estimators/trees/lexico_decision_tree_regressor.md b/docs/API/estimators/trees/lexico_decision_tree_regressor.md index a2ef060..a404bbe 100644 --- a/docs/API/estimators/trees/lexico_decision_tree_regressor.md +++ b/docs/API/estimators/trees/lexico_decision_tree_regressor.md @@ -252,7 +252,7 @@ Predicts the target data for the given input data. ### Example 1: Basic Usage ``` py title="Example_1: Default Parameters" linenums="1" hl_lines="5-7" -from sklearn_fork.metrics import mean_squared_error +from sklearn.metrics import mean_squared_error from scikit_longitudinal.estimators.tree import LexicoDecisionTreeRegressor features_group = [(0, 1), (2, 3)] # (1) @@ -272,7 +272,7 @@ mean_squared_error(y, y_pred) # (2) ### Example 2: How-To Set Threshold Gain of the Lexicographical Approach? ``` py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9" -from sklearn_fork.metrics import mean_squared_error +from sklearn.metrics import mean_squared_error from scikit_longitudinal.estimators.tree import LexicoDecisionTreeRegressor features_group = [(0, 1), (2, 3)] # (1) diff --git a/docs/assets/images/spotlight/setup_code.png b/docs/assets/images/spotlight/setup_code.png index f16be66..cb934ce 100644 Binary files a/docs/assets/images/spotlight/setup_code.png and b/docs/assets/images/spotlight/setup_code.png differ diff --git a/docs/quick-start.md b/docs/quick-start.md index eb27a0e..ae69ae8 100644 --- a/docs/quick-start.md +++ b/docs/quick-start.md @@ -113,6 +113,9 @@ model = LexicoGradientBoostingClassifier( model.fit(dataset.X_train, dataset.y_train) y_pred = model.predict(dataset.X_test) + +# Classification report +print(classification_report(y_test, y_pred)) ``` !!! warning "Neural Networks models" diff --git a/docs/theme/overrides/home.html b/docs/theme/overrides/home.html index b176cbb..4d621a4 100644 --- a/docs/theme/overrides/home.html +++ b/docs/theme/overrides/home.html @@ -386,7 +386,7 @@