Skip to content

Commit

Permalink
black format
Browse files Browse the repository at this point in the history
  • Loading branch information
MartijnCa committed Oct 2, 2023
1 parent 8ce3f66 commit 94a8bb3
Show file tree
Hide file tree
Showing 26 changed files with 9 additions and 35 deletions.
2 changes: 2 additions & 0 deletions openstef/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
# SPDX-License-Identifier: MPL-2.0

"""Openstef custom exceptions."""


# Define custom exception
class NoPredictedLoadError(Exception):
"""No predicted load for given datatime range."""
Expand Down
2 changes: 0 additions & 2 deletions openstef/feature_engineering/holiday_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,6 @@ def make_holiday_func(requested_date):
if (is_holiday_in_two_days or is_saturday_in_two_days) and (
not is_holiday_tommorow and not is_weekend_tommorrow
):

# Create feature function for each holiday
holiday_functions.update(
{
Expand All @@ -195,7 +194,6 @@ def make_holiday_func(requested_date):
if (is_saturday_two_days_ago or is_holiday_two_days_ago) and (
not is_holiday_yesterday and not is_weekend_yesterday
):

# Create featurefunction for the bridge function
holiday_functions.update(
{
Expand Down
1 change: 0 additions & 1 deletion openstef/feature_engineering/lag_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def extract_lag_features(
days_list = []

for lag_feature in feature_names:

# Select the number of days or the number of minutes by matching with a regular expression
number_of_minutes = re.search(r"T-(\d+)min", lag_feature)
number_of_days = re.search(r"T-(\d+)d", lag_feature)
Expand Down
1 change: 0 additions & 1 deletion openstef/model/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,6 @@ def get_pruning_callback(self, trial: optuna.trial.FrozenTrial):

@classmethod
def get_default_values(cls) -> dict:

default_parameter_values = super().get_default_values()
default_parameter_values.update({"gamma": 0.0, "booster": "gbtree"})
return default_parameter_values
Expand Down
1 change: 0 additions & 1 deletion openstef/model/standard_deviation_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

class StandardDeviationGenerator:
def __init__(self, validation_data: pd.DataFrame) -> None:

self.validation_data = validation_data

def generate_standard_deviation_data(self, model: RegressorMixin) -> RegressorMixin:
Expand Down
2 changes: 0 additions & 2 deletions openstef/tasks/create_components_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ def create_components_forecast_task(
if forecasts.index.max() < datetime.utcnow().replace(
tzinfo=timezone.utc
) + timedelta(hours=30):

# Check which input data is missing the most.
# Do this by counting the NANs for (load)forecast, radiation and windspeed
max_index = forecasts.index.max()
Expand Down Expand Up @@ -135,7 +134,6 @@ def main(config: object = None, database: object = None):
)

with TaskContext(taskname, config, database) as context:

model_type = [ml.value for ml in MLModelType]

PredictionJobLoop(
Expand Down
1 change: 0 additions & 1 deletion openstef/tasks/create_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ def main(model_type=None, config=None, database=None):
)

with TaskContext(taskname, config, database) as context:

if model_type is None:
model_type = [ml.value for ml in MLModelType]

Expand Down
1 change: 0 additions & 1 deletion openstef/tasks/run_tracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def run_tracy(context: TaskContext) -> None:
context.logger.info("Start processing Tracy jobs", num_jobs=num_jobs)

for i, job in enumerate(tracy_jobs):

# get a new logger with bound job
logger = context.logger.bind(job=job)
logger.info("Process job", job_counter=i, total_jobs=num_jobs)
Expand Down
1 change: 1 addition & 0 deletions openstef/tasks/split_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ def find_components(
- Dict with the coefficients that result from the fitting
"""

# Define function to fit
def weighted_sum(x, *args):
if len(x) != len(args):
Expand Down
1 change: 0 additions & 1 deletion openstef/tasks/utils/predictionjobloop.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ def _handle_exception_during_iteration(self, prediction_job, e):
)

def _handle_finished_last_iteration(self, prediction_job, successful):

self.context.perf_meter.complete_level(successful)

if self.on_end_callback is not None:
Expand Down
1 change: 0 additions & 1 deletion openstef/validation/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,6 @@ def calc_completeness_dataframe(
logger = structlog.get_logger(__name__)

if homogenise and isinstance(df.index, pd.DatetimeIndex) and len(df) > 0:

median_timediff = int(
df.reset_index().iloc[:, 0].diff().median().total_seconds() / 60.0
)
Expand Down
9 changes: 2 additions & 7 deletions test/unit/feature_engineering/test_apply_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@ def test_apply_features_no_pj(self):
)

def test_train_feature_applicator(self):

input_data_with_features = TrainFeatureApplicator(horizons=[0.25]).add_features(
TestData.load("input_data.csv"),
pj={"model": "proleaf", "lat": 52.132633, "lon": 5.291266},
Expand Down Expand Up @@ -164,18 +163,14 @@ def test_train_feature_applicator_with_latency(self):
# Skip first row, since T-30min not available for first row
self.assertTrue(
input_data_with_features.loc[horizon == 47, ["APX", "T-30min"]]
.iloc[
1:,
]
.iloc[1:,]
.isna()
.all()
.all()
)
self.assertFalse(
input_data_with_features.loc[horizon == 0.25, ["APX", "T-30min"]]
.iloc[
1:,
]
.iloc[1:,]
.isna()
.any()
.any()
Expand Down
2 changes: 0 additions & 2 deletions test/unit/feature_engineering/test_general.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

class TestGeneral(TestCase):
def test_enforce_feature_order_with_horizon_columns(self):

df = pd.DataFrame(
np.arange(12).reshape(3, 4), columns=["load", "horizon", "A", "E"]
)
Expand All @@ -21,7 +20,6 @@ def test_enforce_feature_order_with_horizon_columns(self):
self.assertEqual(result.columns.to_list(), ["load", "A", "E", "horizon"])

def test_enforce_feature_order(self):

df = pd.DataFrame(np.arange(9).reshape(3, 3), columns=["load", "A", "E"])
result = enforce_feature_order(df)
self.assertEqual(result.columns.to_list(), ["load", "A", "E"])
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ def setUp(self):
]

def test_extract_minute_features_short_horizon(self):

testlist_minutes, testlist_days = extract_lag_features(
self.feature_names, horizon=0.25
)
Expand Down
1 change: 0 additions & 1 deletion test/unit/model/regressors/test_arima.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def test_set_feature_importance_from_arima(self):
self.assertTrue(np.allclose(pvalues, importances["gain"]))

def test_score_backtest(self):

model = ARIMAOpenstfRegressor(backtest_max_horizon=180)
model.fit(self.train_input.iloc[:150, 1:], self.train_input.iloc[:150, 0])

Expand Down
1 change: 0 additions & 1 deletion test/unit/model/regressors/test_xgb_quantile.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ def predict(self, input, quantile):

class MockScore:
def get(self, a, b):

book = {"a": 12, "b": 23, "c": 36}

return book[a] + b
Expand Down
3 changes: 2 additions & 1 deletion test/unit/model/test_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ def _rewrite_absolute_artifact_path(
metadata_file: str, new_path: str, artifact_path_key: str
) -> None:
"""Helper function to rewrite the absolute path of the artifacts in meta.yaml files.
This is required since generating new models takes too long for a unit test and relative paths are not supported."""
This is required since generating new models takes too long for a unit test and relative paths are not supported.
"""
with open(metadata_file, "r") as f:
metadata = yaml.safe_load(f)

Expand Down
1 change: 0 additions & 1 deletion test/unit/model/test_standard_deviation_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

class MockModel:
def predict(self, *args):

# Prepare mock_forecast -
# it should include mutliple observations of the
# same time of day for the same horizon,
Expand Down
2 changes: 0 additions & 2 deletions test/unit/monitoring/test_teams.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,13 @@ def setUp(self):
self.pj = TestData.get_prediction_job(pid=307)

def test_post_teams(self, teamsmock):

msg = "test"

teams.post_teams(msg, url="MOCK_URL")
card_mock = teamsmock.connectorcard.return_value
self.assertTrue(card_mock.send.called)

def test_post_teams_invalid_keys(self, teamsmock):

msg = "test"
invalid_coefs = pd.DataFrame(
{
Expand Down
3 changes: 2 additions & 1 deletion test/unit/pipeline/test_create_component_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ def setUp(self) -> None:
def test_load_dazls_model(self):
"""Version updates of sklearn could break pickle.load
Let's test for that explicitly
Assert that loading the old model generates an exception and the new model does not"""
Assert that loading the old model generates an exception and the new model does not
"""

old_model_file = PROJECT_ROOT / "openstef/data/dazls_stored_3.2.49.sav"
new_model_file = PROJECT_ROOT / "openstef/data/dazls_stored.sav"
Expand Down
2 changes: 0 additions & 2 deletions test/unit/pipeline/test_optimize_hyperparameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,6 @@ def test_optimize_hyperparameters_pipeline_with_custom_split(self):

@patch("openstef.validation.validation.is_data_sufficient", return_value=False)
def test_optimize_hyperparameters_pipeline_insufficient_data(self, mock):

# if data is not sufficient a InputDataInsufficientError should be raised
with self.assertRaises(InputDataInsufficientError):
optimize_hyperparameters_pipeline_core(
Expand All @@ -148,7 +147,6 @@ def test_optimize_hyperparameters_pipeline_no_data(self):
)

def test_optimize_hyperparameters_pipeline_no_load_data(self):

input_data = self.input_data.drop("load", axis=1)
# if there is no data a InputDataWrongColumnOrderError should be raised
with self.assertRaises(InputDataWrongColumnOrderError):
Expand Down
1 change: 0 additions & 1 deletion test/unit/pipeline/test_train_predict_backtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ def test_train_model_pipeline_core_happy_flow_nfold(self):
self.assertEqual(len(forecast), len(data_with_features))

def test_train_model_pipeline_core_custom_split(self):

pj = self.pj
# test wrong custom backtest split
pj.backtest_split_func = SplitFuncDataClass(
Expand Down
1 change: 0 additions & 1 deletion test/unit/postprocessing/test_postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@

class TestPostProcess(BaseTestCase):
def test_post_process_wind_solar(self):

forecast = pd.DataFrame({"forecast": [-10, -15, -33, 1, 1.3806e-23]})

forecast_positive_removed = pd.DataFrame()
Expand Down
1 change: 0 additions & 1 deletion test/unit/tasks/test_calculate_kpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def get_database_mock_predicted_empty():


class TestPerformanceCalcKpiForSpecificPid(BaseTestCase):

# Test whether correct kpis are calculated for specific test data
def test_calc_kpi_for_specific_pid(self):
kpis = calc_kpi_for_specific_pid(
Expand Down
1 change: 0 additions & 1 deletion test/unit/tasks/test_solar.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ def setUp(self) -> None:
}

def test_make_solar_predicion_pj(self):

context = MagicMock()
context.database.get_solar_input = MagicMock(return_value=self.test_solar_input)

Expand Down
1 change: 0 additions & 1 deletion test/unit/utils/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@


class TestData:

DATA_FILES_FOLDER = Path(__file__).parent.parent / "data"
TRAINED_MODELS_FOLDER = Path(__file__).parent.parent / "trained_models"

Expand Down

0 comments on commit 94a8bb3

Please sign in to comment.