Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
a21402f
🎉 Initial forecasting atempt
Mar 15, 2022
8133b9c
Updating forecasting
Mar 15, 2022
4bb1c38
Update forecasting
Mar 15, 2022
e9df5fc
adding a tuning directory
Mar 15, 2022
a716690
🎨 got prices from df the "proper" way
rlaker Mar 16, 2022
f207da4
✅ added tests for rl.py
rlaker Mar 17, 2022
3388852
🔀 Merge pull request #29 from rlaker/add_tests
rlaker Mar 17, 2022
7de5eae
removed test_installation
rlaker Mar 18, 2022
ed21f2c
🚧 Updating the forecasting with modular
Mar 19, 2022
a2a78a4
Merge branch 'forecast' of https://github.com/rlaker/Hackathon into f…
Mar 19, 2022
643e996
Merge pull request #30 from rlaker/forecast
rlaker Mar 19, 2022
1593cd0
updating tuningT
Mar 20, 2022
f4ca6d0
final import, formatting not working
Mar 20, 2022
1739c60
fixing for multiprocessing
Mar 23, 2022
fa739ef
fixing for multiprocessing
Mar 23, 2022
fc3ac57
change to rl.py
Mar 23, 2022
705d9fa
fix to annoying warning message
Mar 23, 2022
9bdbac0
Merge branch 'main' of https://github.com/rlaker/Hackathon into main
Mar 23, 2022
29d4477
adding parallel evaluation
Mar 24, 2022
5bb550a
opt
Mar 24, 2022
7b955ec
Update Hack/rl.py
griffinfarrow Mar 25, 2022
afcc636
Update Hack/rl.py
griffinfarrow Mar 25, 2022
4152f28
Update Hack/rl.py
griffinfarrow Mar 25, 2022
ac73371
Update Tuning/parallel_optimization.py
griffinfarrow Mar 25, 2022
1f5ec62
Update Tuning/parallel_optimization.py
griffinfarrow Mar 25, 2022
a418694
Update Tuning/parallel_optimization.py
griffinfarrow Mar 25, 2022
86a2caa
Update Tuning/parallel_optimization.py
griffinfarrow Mar 25, 2022
5c92090
🐛 removed E402 to stop imports breaking
rlaker Mar 25, 2022
f098238
🎨 moved to notebooks folder
rlaker Mar 25, 2022
0bc36a7
🎨 moved to notebooks since they are an output
rlaker Mar 25, 2022
418a8b6
➕ added dependencies
rlaker Mar 25, 2022
76737d4
Merge branch 'main' into hyperparam_tuning
rlaker Mar 25, 2022
7e1c17c
💚 trying to fix github actions
rlaker Mar 25, 2022
e36d8e9
Merge branch 'hyperparam_tuning' of github.com:rlaker/Hackathon into …
rlaker Mar 25, 2022
9ac854c
💚 trying to specify package in setup.cfg
rlaker Mar 25, 2022
7a724e6
✅ fixed linting
rlaker Mar 25, 2022
4152276
🔀 Merge pull request #31 from rlaker/hyperparam_tuning
rlaker Mar 25, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ repos:
rev: 3.9.1
hooks:
- id: flake8
args: ['--count', '--select', 'E101,E11,E111,E112,E113,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E133,E20,E211,E231,E241,E242,E251,E252,E26,E265,E266,E27,E301,E302,E303,E304,E305,E306,E401,E402,E471,E502,E701,E711,E712,E713,E714,E722,E731,E901,E902,F822,F823,W191,W291,W292,W293,W391,W601,W602,W603,W604,W605,W690', "--ignore=E203"]
args: ['--count', '--select', 'E101,E11,E111,E112,E113,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E133,E20,E211,E231,E241,E242,E251,E252,E26,E265,E266,E27,E301,E302,E303,E304,E305,E306,E401,E471,E502,E701,E711,E712,E713,E714,E722,E731,E901,E902,F822,F823,W191,W291,W292,W293,W391,W601,W602,W603,W604,W605,W690', "--ignore=E203"]
exclude: ".*(.fits|.fts|.fit|.txt|tca.*|extern.*|.rst|.md|.svg|versioneer.py)$"
- repo: https://github.com/myint/autoflake
rev: v1.4
Expand Down
139 changes: 139 additions & 0 deletions Forecasting/Bayesian_Forecast.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
from datetime import date, datetime

import matplotlib.pyplot as plt
import numpy as np
from pybats.analysis import analysis
from pybats.plot import *
from pybats.point_forecast import median


####################################
# Plotting Functions
####################################
def plot_forecast(
fig,
ax,
y,
f,
samples,
dates,
linewidth=1,
linecolor="b",
credible_interval=95,
**kwargs,
):
"""
Plot observations along with sequential forecasts and credible intervals.
"""

ax.scatter(dates, y, color="k")
ax.plot(dates, f, color=linecolor, linewidth=linewidth)
alpha = (100 - credible_interval) / 2
upper = np.percentile(samples, [100 - alpha], axis=0).reshape(-1)
lower = np.percentile(samples, [alpha], axis=0).reshape(-1)
ax.fill_between(dates, upper, lower, alpha=0.3, color=linecolor)

if kwargs.get("xlim") is None:
kwargs.update({"xlim": [dates[0], dates[-1]]})

if kwargs.get("legend") is None:
legend = ["Observations", "Forecast", "Credible Interval"]

ax = ax_style(ax, legend=legend, **kwargs)

# If dates are actually dates, then format the dates on the x-axis
if isinstance(dates[0], (datetime, date)):
fig.autofmt_xdate()

return ax


def forecast_ax_style(
ax,
ylim=None,
xlim=None,
xlabel=None,
ylabel=None,
title=None,
legend=None,
legend_inside_plot=True,
topborder=False,
rightborder=False,
**kwargs,
):
"""
A helper function to define many elements of axis style at once.
"""

if legend is not None:
if legend_inside_plot:
ax.legend(legend)
else:
ax.legend(
legend,
bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.5,
frameon=False,
)
# Make room for the legend
plt.subplots_adjust(right=0.85)

if ylim is not None:
ax.set_ylim(ylim)
if xlim is not None:
ax.set_xlim(xlim)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)

# remove the top and right borders
ax.spines["top"].set_visible(topborder)
ax.spines["right"].set_visible(rightborder)

plt.tight_layout()

return ax


####################################
# Forecasting Functions
####################################


def evaluate(epex_data, horizon=6, forecast_start_index=0, forecast_end_index=-1):
prices = epex_data.values[:, 0]
datetimes = epex_data.index
horizon

forecast_start_date = datetimes[forecast_start_index]
forecast_end_date = datetimes[forecast_end_index]

mod, samples = analysis(
prices,
family="poisson",
dates=datetimes,
forecast_start=forecast_start_date, # First time step to forecast on
forecast_end=forecast_end_date, # Final time step to forecast on
ntrend=1, # Intercept and slope in model
nsamps=500, # Number of samples taken in the Poisson process
seasPeriods=[
48
], # Length of the seasonal variations in the data - i.e. every 24hr here
seasHarmComponents=[
[1, 2, 3, 4, 6]
], # Variations to pick out from the seaonal period
k=horizon, # Forecast horizon. If k>1, default is to forecast 1:k steps ahead, marginally
prior_length=48, # How many data point to use in defining prior - 48=1 day
rho=0.3, # Random effect extension, which increases the forecast variance (see Berry and West, 2019)
deltrend=0.98, # Discount factor on the trend component (the intercept)
delregn=0.98, # Discount factor on the regression component
delSeas=0.98,
)

forecast = median(samples)

return datetimes, prices, samples, forecast
103 changes: 89 additions & 14 deletions Hack/rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,37 @@ def get_mode(arr, bin_number=10):
return np.nan


def get_expected_price(price_array, idx, window_size=2 * 24, mode="mode"):
def get_expected_price(price_array, idx, window_size=2 * 24, mode="median"):
"""Gets the expected price using the history of prices.

Currently this is a rolling window, with some kind of averaging.

In the future we want to implement a forecasting model instead.

Parameters
----------
price_array : array
All the prices in the environment
idx : int
Current idx of the environment (time)
window_size : int, optional
size of the rolling window, by default 2*24
mode : str, optional
type of averaging to use, by default "median"

Returns
-------
float
Expected price at this time index
"""
idx = int(idx)

if idx == 0:
arr = price_array[idx]
elif idx < window_size:
arr = price_array[:idx]
arr = price_array[: idx + 1]
else:
arr = price_array[idx - window_size : idx]
arr = price_array[idx - window_size : idx + 1]

if mode == "mean":
return np.mean(arr)
Expand All @@ -41,9 +63,8 @@ def __init__(self, obs_price_array, start_energy=1, window_size=1000, power=1):
self.action_space = gym.spaces.Discrete(3)
# current_price, mean_price, current_energy, time
self.observation_space = gym.spaces.Box(
low=np.array([-np.inf, -np.inf, 0, 0]),
high=np.array([np.inf, np.inf, 1, np.inf]),
dtype=np.float32,
low=np.array([-np.inf, -np.inf, 0, 0], dtype=np.float32),
high=np.array([np.inf, np.inf, 1, np.inf], dtype=np.float32),
)
# our state is the charge
self.start_energy = start_energy
Expand All @@ -60,6 +81,7 @@ def __init__(self, obs_price_array, start_energy=1, window_size=1000, power=1):
self.get_price(self.time),
self.get_expected_price(self.time),
start_energy,
self.time,
]
)

Expand All @@ -74,7 +96,7 @@ def get_expected_price(self, idx, window_size=2 * 24, mode="median"):
self.price_array, idx, window_size=window_size, mode=mode
)

def apply_action(self, mapped_action, current_energy):
def apply_action(self, human_action, current_energy):
"""Applies the mapped action.

-1 for sell
Expand All @@ -83,20 +105,20 @@ def apply_action(self, mapped_action, current_energy):

Parameters
----------
mapped_action : int
human_action : int
Action to applly, has to be the mapped action
current_energy : float
Current energy in the battery

"""
if mapped_action == -1:
if human_action == -1:
# discharge === selling for 30 mins (0.5 hours)
new_energy = current_energy - (self.power * 0.5)

elif mapped_action == 0:
elif human_action == 0:
# hold === do nothing
new_energy = current_energy
elif mapped_action == 1:
elif human_action == 1:
# charge === buy energy for 30 mins (0.5 hours)
new_energy = current_energy + (self.power * 0.5 * self.efficiency)

Expand All @@ -115,8 +137,8 @@ def get_reward(self, delta_energy, current_price, expected_price):

def step(self, action):
current_price, expected_price, current_energy, current_time = self.state
mapped_action = env2human(action)
new_energy = self.apply_action(mapped_action, current_energy)
human_action = env2human(action)
new_energy = self.apply_action(human_action, current_energy)

# want to save this to punish even if battery is empty/full

Expand Down Expand Up @@ -164,11 +186,48 @@ def reset(self):
return self.state


def humans2env(action):
def human2env(action):
"""Needs because Gym env would only work with 0,1,2 as states
but this is confusing as a human.

We have:
-1 == sell == 0 in env
0 == hold == 1 in env
1 == buy == 2 in env

Parameters
----------
action : int
Human readable action

Returns
-------
int
Action that the environment accepts
"""
return int(action + 1)


def env2human(action):
"""Needs because Gym env would only work with 0,1,2 as states
but this is confusing as a human.

We have:
-1 == sell == 0 in env
0 == hold == 1 in env
1 == buy == 2 in env

Parameters
----------
int
Action that the environment accepts

Returns
-------
action : int
Human readable action

"""
return int(action - 1)


Expand Down Expand Up @@ -268,3 +327,19 @@ def evaluate(model, new_env=None, num_episodes=100, index=None):
)

return mean_episode_reward


def quick_eval(idx, model):
"""
Evaluation func for the multiprocessing that we have designed to be as quick as possible!
"""
env = model.get_env()
env.reset()
done = False
total_reward = 0
obs = env.reset()
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
total_reward += reward
return total_reward
Empty file added Hack/tests/__init__.py
Empty file.
Loading