Skip to content

Commit

Permalink
refactor(docs): improve imports (sklearn_fork is now sklearn) [cd build]
Browse files Browse the repository at this point in the history
  • Loading branch information
simonprovost committed Jul 11, 2024
1 parent 3efdaf8 commit 82aec4d
Show file tree
Hide file tree
Showing 14 changed files with 50 additions and 46 deletions.
8 changes: 4 additions & 4 deletions docs/API/data_preparation/aggregation_function.md
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ Apply the aggregation function to the feature groups in the dataset.
``` py title="Example 1: Basic Usage with Mean Aggregation" linenums="1" hl_lines="15-25"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from scikit_longitudinal.data_preparation.aggregation_function import AggrFunc
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand All @@ -156,7 +156,7 @@ agg_func.prepare_data(dataset.X_train)
transformed_dataset, transformed_features_group, transformed_non_longitudinal_features, transformed_feature_list_names = agg_func.transform()

# Example model training (standard scikit-learn model given that we are having a non-longitudinal static dataset)
from sklearn_fork.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier

clf = DecisionTreeClassifier()
clf.fit(transformed_dataset, dataset.y_train)
Expand All @@ -172,7 +172,7 @@ accuracy = accuracy_score(dataset.y_test, y_pred)
``` py title="Example 2: Using Custom Aggregation Function" linenums="1" hl_lines="15-28"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from scikit_longitudinal.data_preparation.aggregation_function import AggrFunc
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down Expand Up @@ -200,7 +200,7 @@ agg_func.prepare_data(dataset.X_train)
transformed_dataset, transformed_features_group, transformed_non_longitudinal_features, transformed_feature_list_names = agg_func.transform()

# Example model training (standard scikit-learn model given that we are having a non-longitudinal static dataset)
from sklearn_fork.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeClassifier

clf = DecisionTreeClassifier()

Expand Down
6 changes: 3 additions & 3 deletions docs/API/data_preparation/longitudinal_dataset.md
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ Set the test target data attribute.

``` py title="Example 1: Basic Usage" linenums="1" hl_lines="7-20"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down Expand Up @@ -380,7 +380,7 @@ accuracy = accuracy_score(y_test, y_pred)

``` py title="Example 2: Use faster setup with load_data_target_train_test_split " linenums="1" hl_lines="7-18"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down Expand Up @@ -420,7 +420,7 @@ accuracy = accuracy_score(y_test, y_pred)

``` py title="Example 2: Using Custom Feature Groups" linenums="1" hl_lines="7-24"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down
14 changes: 7 additions & 7 deletions docs/API/data_preparation/sepwav.md
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@ Predict class for X, using the classifier for the specified wave number.
``` py title="Example 1: Basic Usage with Majority Voting" linenums="1" hl_lines="16-26"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from scikit_longitudinal.data_preparation.separate_waves import SepWav
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn_fork.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down Expand Up @@ -229,9 +229,9 @@ accuracy = accuracy_score(dataset.y_test, y_pred)
``` py title="Example 2: Using Stacking Ensemble" linenums="1" hl_lines="17-28"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from scikit_longitudinal.data_preparation.separate_waves import SepWav
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn_fork.linear_model import LogisticRegression
from sklearn_fork.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down Expand Up @@ -272,8 +272,8 @@ accuracy = accuracy_score(dataset.y_test, y_pred)
``` py title="Example 3: Using Parallel Processing" linenums="1" hl_lines="20-31"
from scikit_longitudinal.data_preparation import LongitudinalDataset
from scikit_longitudinal.data_preparation.separate_waves import SepWav
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn_fork.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score

# Define your dataset
input_file = './stroke.csv'
Expand Down
6 changes: 3 additions & 3 deletions docs/API/estimators/ensemble/lexico_deep_forest.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ Predict class probabilities for samples in X.
### Example 1: Basic Usage

``` py title="example_1: Basic Usage" linenums="1" hl_lines="6-14"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier

features_group = [[0, 1], [2, 3]] # (1)
Expand Down Expand Up @@ -162,7 +162,7 @@ accuracy_score(y, clf.predict(X)) # (3)
### Example 2: Using Multiple Types of Longitudinal Estimators

``` py title="example_2: Using Multiple Types of Longitudinal Estimators" linenums="1" hl_lines="6-19"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier

features_group = [[0, 1], [2, 3]] # (1)
Expand Down Expand Up @@ -196,7 +196,7 @@ accuracy_score(y, clf.predict(X)) # (4)
### Example 3: Disabling Diversity Estimators

``` py title="example_3: Disabling Diversity Estimators" linenums="1" hl_lines="6-15"
import sklearn_fork.metrics import accuracy_score
import sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.trees import LexicoDeepForestClassifier

features_group = [[0, 1], [2, 3]] # (1)
Expand Down
6 changes: 3 additions & 3 deletions docs/API/estimators/ensemble/lexico_gradient_boosting.md
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ Predict class probabilities for samples in X.
### Example 1: Basic Usage

``` py title="Example_1: Default Parameters" linenums="1" hl_lines="7-9"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \
LexicoGradientBoostingClassifier

Expand All @@ -174,7 +174,7 @@ accuracy_score(y, y_pred) # (2)
### Example 2: Using Specific Parameters

``` py title="Example_2: Using Specific Parameters" linenums="1" hl_lines="7-12"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \
LexicoGradientBoostingClassifier

Expand All @@ -199,7 +199,7 @@ accuracy_score(y, y_pred) # (3)
### Exemple 3: Using the learning rate

``` py title="Example_3: Using the learning rate" linenums="1" hl_lines="7-11"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical.lexico_gradient_boosting import \
LexicoGradientBoostingClassifier

Expand Down
12 changes: 6 additions & 6 deletions docs/API/estimators/ensemble/lexico_random_forest.md
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,8 @@ Predict class probabilities for samples in X.

### Example 1: Basic Usage

```py title="Example_1: Default Parameters" linenums="1" hl_lines="6-8"
from sklearn_fork.metrics import accuracy_score
``` py title="Example_1: Default Parameters" linenums="1" hl_lines="6-8"
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier

features_group = [(0, 1), (2, 3)] # (1)
Expand All @@ -170,8 +170,8 @@ accuracy_score(y, y_pred) # (2)

### Example 2: How-To Set Threshold Gain of the Lexicographical Approach

```py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9"
from sklearn_fork.metrics import accuracy_score
``` py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9"
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier

features_group = [(0, 1), (2, 3)] # (1)
Expand All @@ -193,8 +193,8 @@ accuracy_score(y, y_pred) # (3)

### Example 3: How-To Set the Number of Estimators

```py title="Example_3: How-To Set the Number of Estimators" linenums="1" hl_lines="6-9"
from sklearn_fork.metrics import accuracy_score
``` py title="Example_3: How-To Set the Number of Estimators" linenums="1" hl_lines="6-9"
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier

features_group = [(0, 1), (2, 3)] # (1)
Expand Down
4 changes: 2 additions & 2 deletions docs/API/estimators/ensemble/longitudinal_stacking.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ Predicts the target data probabilities for the given input data.
from scikit_longitudinal.estimators.ensemble.longitudinal_stacking.longitudinal_stacking import (
LongitudinalStackingClassifier,
)
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier
from sklearn.metrics import accuracy_score

Expand Down Expand Up @@ -182,7 +182,7 @@ accuracy_score(y, y_pred) # (6)
from scikit_longitudinal.estimators.ensemble.longitudinal_stacking.longitudinal_stacking import (
LongitudinalStackingClassifier,
)
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier
from sklearn.metrics import accuracy_score

Expand Down
4 changes: 2 additions & 2 deletions docs/API/estimators/ensemble/longitudinal_voting.md
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ Predict probabilities using the ensemble model.
from scikit_longitudinal.estimators.ensemble.longitudinal_voting.longitudinal_voting import (
LongitudinalVotingClassifier,
)
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier
from sklearn.metrics import accuracy_score

Expand Down Expand Up @@ -198,7 +198,7 @@ accuracy_score(y, y_pred) # (5)
from scikit_longitudinal.estimators.ensemble.longitudinal_voting.longitudinal_voting import (
LongitudinalVotingClassifier,
)
from sklearn_fork.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from scikit_longitudinal.estimators.ensemble.lexicographical import LexicoRandomForestClassifier
from sklearn.metrics import accuracy_score

Expand Down
20 changes: 10 additions & 10 deletions docs/API/estimators/ensemble/nested_trees.md
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,8 @@ Print the structure of the nested tree classifier.

``` py title="Example 1: Basic Usage" linenums="1" hl_lines="8-11"
from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier
from sklearn_fork.model_selection import train_test_split
from sklearn_fork.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

features_group = [(0, 1), (2, 3)] # (1)
non_longitudinal_features = [4, 5] # (2)
Expand All @@ -185,8 +185,8 @@ accuracy_score(y_test, y_pred) # (3)

``` py title="Example 2: Using Custom Hyperparameters for Inner Estimators" linenums="1" hl_lines="8-18"
from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier
from sklearn_fork.model_selection import train_test_split
from sklearn_fork.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

features_group = [(0, 1), (2, 3)] # (1)
non_longitudinal_features = [4, 5] # (2)
Expand Down Expand Up @@ -218,8 +218,8 @@ accuracy_score(y_test, y_pred) # (4)

``` py title="Example 3: Using Parallel Processing" linenums="1" hl_lines="8-13"
from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier
from sklearn_fork.model_selection import train_test_split
from sklearn_fork.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

features_group = [(0, 1), (2, 3)] # (1)
non_longitudinal_features = [4, 5] # (2)
Expand Down Expand Up @@ -248,8 +248,8 @@ accuracy_score(y_test, y_pred) # (5)

``` py title="Example 4: Saving the Nested Trees Structure" linenums="1" hl_lines="8-12"
from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier
from sklearn_fork.model_selection import train_test_split
from sklearn_fork.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

features_group = [(0, 1), (2, 3)] # (1)
non_longitudinal_features = [4, 5] # (2)
Expand All @@ -276,8 +276,8 @@ accuracy_score(y_test, y_pred) # (4)

``` py title="Example 5: Printing the Nested Trees Structure" linenums="1" hl_lines="8-11"
from scikit_longitudinal.estimators.ensemble import NestedTreesClassifier
from sklearn_fork.model_selection import train_test_split
from sklearn_fork.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

features_group = [(0, 1), (2, 3)] # (1)
non_longitudinal_features = [4, 5] # (2)
Expand Down
5 changes: 3 additions & 2 deletions docs/API/estimators/trees/lexico_decision_tree_classifier.md
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,8 @@ The predicted class probabilities of an input sample are computed as the mean pr
### Example 1: Basic Usage

``` py title="Example_1: Default Parameters" linenums="1" hl_lines="5-7"
from sklearn_fork.metrics imp mators.tree import LexicoDecisionTreeClassifier
from sklearn.metrics import accuracy
from scikit_longitudinal.estimaators.tree import LexicoDecisionTreeClassifier

features_group = [(0,1), (2,3)] # (1)

Expand All @@ -181,7 +182,7 @@ accuracy_score(y, y_pred) # (2)
### Example 2: How-To Set Threshold Gain of the Lexicographical Approach?

``` py title="example_1: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9"
from sklearn_fork.metrics import accuracy_score
from sklearn.metrics import accuracy_score
from scikit_longitudinal.estimators.tree import LexicoDecisionTreeClassifier

features_group = [(0,1), (2,3)] # (1)
Expand Down
4 changes: 2 additions & 2 deletions docs/API/estimators/trees/lexico_decision_tree_regressor.md
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ Predicts the target data for the given input data.
### Example 1: Basic Usage

``` py title="Example_1: Default Parameters" linenums="1" hl_lines="5-7"
from sklearn_fork.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error
from scikit_longitudinal.estimators.tree import LexicoDecisionTreeRegressor

features_group = [(0, 1), (2, 3)] # (1)
Expand All @@ -272,7 +272,7 @@ mean_squared_error(y, y_pred) # (2)
### Example 2: How-To Set Threshold Gain of the Lexicographical Approach?

``` py title="Example_2: How-To Set Threshold Gain of the Lexicographical Approach" linenums="1" hl_lines="6-9"
from sklearn_fork.metrics import mean_squared_error
from sklearn.metrics import mean_squared_error
from scikit_longitudinal.estimators.tree import LexicoDecisionTreeRegressor

features_group = [(0, 1), (2, 3)] # (1)
Expand Down
Binary file modified docs/assets/images/spotlight/setup_code.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 3 additions & 0 deletions docs/quick-start.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,9 @@ model = LexicoGradientBoostingClassifier(
model.fit(dataset.X_train, dataset.y_train)
y_pred = model.predict(dataset.X_test)
# Classification report
print(classification_report(y_test, y_pred))
```
!!! warning "Neural Networks models"
Expand Down
4 changes: 2 additions & 2 deletions docs/theme/overrides/home.html
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ <h3>
</h3>
<hr/>
<cite>
Simon designed, developed and maintain the <a href="http://scikit-longitudinal.org" target="_blank">Scikit-Longitudinal</a> library. Simon is a Ph.D student in computer science at the University of Kent, United-Kingdom. His research interests are in Machine Learning (ML), Automated Machine Learning and ML-applied Healthcare.
Simon designed, developed and maintain the <a href="https://github.com/simonprovost/scikit-longitudinal" target="_blank">Scikit-Longitudinal</a> library. Simon is a Ph.D student in computer science at the University of Kent, United-Kingdom. His research interests are in Machine Learning (ML), Automated Machine Learning and ML-applied Healthcare.
</cite>
</figcaption>
</figure>
Expand All @@ -403,7 +403,7 @@ <h3>
</h3>
<hr/>
<cite>
Alex is a Professor of Computational Intelligence at the University of Kent, United-Kingdom. He is the main supervisor of Simon's Ph.D., and also designed the <a href="http://scikit-longitudinal.org" target="_blank">Scikit-Longitudinal</a> library with. Alex's research interests are in machine learning, data mining, and bioinformatics.
Alex is a Professor of Computational Intelligence at the University of Kent, United-Kingdom. He is the main supervisor of Simon's Ph.D., and also designed the <a href="https://github.com/simonprovost/scikit-longitudinal" target="_blank">Scikit-Longitudinal</a> library with. Alex's research interests are in machine learning, data mining, and bioinformatics.
</cite>
</figcaption>
</figure>
Expand Down

0 comments on commit 82aec4d

Please sign in to comment.