Skip to content

Commit

Permalink
Small tweaks to risk progression
Browse files Browse the repository at this point in the history
  • Loading branch information
mynl committed Oct 28, 2023
1 parent 3885427 commit a06c2fc
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 65 deletions.
10 changes: 8 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,14 @@ Installation
Version History
-----------------

0.20.1 (July-2023 branch)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
0.20.2
~~~~~~~~~

* risk progression logic adjusted to exclude values with zero probability; graphs
updated to use step drawstyle.

0.20.1
~~~~~~~

* Bug fix in parser interpretation of arrays with step size
* Added figures for AAS paper to extensions.ft and extensions.figures
Expand Down
2 changes: 1 addition & 1 deletion aggregate/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
__email__ = "steve@convexrisk.com"
__status__ = "beta"
# only need to change here, feeds conf.py (docs) and pyproject.toml (build)
__version__ = "0.20.1"
__version__ = "0.20.2"



Expand Down
78 changes: 43 additions & 35 deletions aggregate/extensions/risk_progression.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,23 +96,24 @@ def plot_comparison(self, projections, axs, smooth):
axd.legend(loc='upper right').set(title='NORMALIZED losses')

# plot normalized distributions on linear and return period scale
ds = 'steps-pre'
ax.plot(self.density_df[f'p_{unit}'].cumsum(),
self.density_df.loss / self[unit].est_m, c=lc, lw=lw*2, label=unit)
self.density_df.loss / self[unit].est_m, c=lc, lw=lw*2, drawstyle=ds, label=unit)
ax.plot(proj.F, np.array(proj.index) / mn,
c=lc2, lw=lw, label='Projection')
c=lc2, lw=lw, drawstyle=ds, label='Projection')
ax.plot(self.density_df['F'], self.density_df.loss /
self.est_m, c='C0', lw=lw, label='total')
self.est_m, c='C0', lw=lw, drawstyle=ds, label='total')
ax.set(ylim=[0, 5], xlabel='probability', ylabel='normalized loss')
ax.axhline(1, lw=.5, ls='--', c='C7')
ax.legend(loc='upper left')

axr.plot(1 / (1 - self.density_df[f'p_{unit}'].cumsum()),
self.density_df.loss / self[unit].est_m, c=lc, lw=lw*2, label=unit)
self.density_df.loss / self[unit].est_m, c=lc, lw=lw*2, drawstyle=ds, label=unit)
proj = proj.query('F > 1e-11 and S > 1e-11')
axr.plot(1 / proj.S, np.array(proj.index) / mn,
c=lc2, lw=lw, label='Projection')
c=lc2, lw=lw, drawstyle=ds, label='Projection')
axr.plot(1 / self.density_df['S'], self.density_df.loss /
self.est_m, c='C0', lw=lw, label='total')
self.est_m, c='C0', lw=lw, drawstyle=ds, label='total')
axr.set(xlim=[1, 1e4], ylim=1e-1, xscale='log', yscale='log',
xlabel='log return period', ylabel='log normalized loss')
axr.axhline(1, lw=.5, ls='--', c='C7')
Expand Down Expand Up @@ -161,18 +162,22 @@ def up_down_distributions(self):
down_functions = {}
up_distributions = {}
down_distributions = {}
# avoid extraneous up and down on impossible values
bit = self.density_df
bit0 = self.density_df.query('p_total > 0 or loss==0')
bit0 = bit0.reindex(bit.index, method='pad')
for unit in self.unit_names:
u, d, c = make_up_down(self.density_df[f'exeqa_{unit}'])
u, d, c = make_up_down(bit0[f'exeqa_{unit}'])
up_functions[unit] = u
down_functions[unit] = d

u = u.to_frame()
u['p_total'] = self.density_df.p_total
u['p_total'] = bit.p_total
du, _ = make_distribution(u)
up_distributions[unit] = du

d = d.to_frame()
d['p_total'] = self.density_df.p_total
d['p_total'] = bit.p_total
dd, _ = make_distribution(d)
down_distributions[unit] = dd

Expand All @@ -190,21 +195,22 @@ def plot_up_down(self, udd, axs):
udd = UDD named tuple (above)
"""

for unit, ax in zip(self.unit_names, axs.flat):
ax = self.density_df[f'exeqa_{unit}'].plot(ax=ax, lw=4, c='C7')
udd.up_functions[unit].plot(ax=ax)
udd.down_functions[unit].plot(ax=ax)
# left and middle plots
for unit, ax, recreated_c in zip(self.unit_names, axs.flat, ['C0', 'C1']):
ax = self.density_df[f'exeqa_{unit}'].plot(ax=ax, lw=.5, c='C7', drawstyle='steps-mid')
(udd.up_functions[unit] - udd.down_functions[unit]
).plot(ax=ax, lw=1.5, ls=':', c='C2', label='recreated')
).plot(ax=ax, lw=1.5, ls='-', c=recreated_c, label='recreated', drawstyle='steps-post')
udd.up_functions[unit].plot(ax=ax, c='C3', drawstyle='steps-post', lw=1, ls='--')
udd.down_functions[unit].plot(ax=ax, c='C5', drawstyle='steps-post', lw=1, ls='-.')
ax.legend()
ax.set(xlabel='loss', ylabel='up or down function')

# plot ud distributions
# plot ud distributions (right hand plot)
ax = axs.flat[-1]
for (k, v), c in zip(udd.up_distributions.items(), ['C0', 'C1']):
v.cumsum().plot(c=c, ax=ax, label=f'Up {k}')
v.cumsum().plot(c=c, ax=ax, label=f'Up {k}', drawstyle='steps-post')
for (k, v), c in zip(udd.down_distributions.items(), ['C0', 'C1']):
v.cumsum().plot(c=c, ls=':', ax=ax, label=f'Down {k}')
v.cumsum().plot(c=c, ls=':', ax=ax, label=f'Down {k}', drawstyle='steps-post')
ax.legend(loc='lower right')
ax.set(xlabel='loss', ylabel='cumulative probability')

Expand Down Expand Up @@ -296,48 +302,50 @@ def price_compare(self, dn, projection_dists, ud_dists):
return compare


def full_monty(self, dn, truncate=True, smooth=16):
def full_monty(self, dn, truncate=True, smooth=16, plot=True):
"""
One-stop shop for a Portfolio self
Unlimited assets
Prints all on one giant figure
"""

# figure for all plots
fig, axs = plt.subplots(4, 3, figsize=(
3 * 3.5, 4 * 2.45), constrained_layout=True)

# in the known bounded case we can truncate
regex = ''.join([i[0] for i in self.line_names_ex])
if truncate:
self.density_df = self.density_df.loc[:self.density_df.F.idxmax()]
self._linear_quantile_function = None

# density and exa plots
axd = {'A': axs[0, 0], 'B': axs[0, 1], 'C': axs[0, 2]}
self.plot(axd=axd)
self.density_df.filter(regex=f'exeqa_[{regex}]').plot(ax=axd['C'])
axd['C'].set(xlabel='loss', ylabel='Conditional expectation')

# projection distributions
projection_dists, sum_probs = make_projection_distributions(self)
if not np.allclose(list(sum_probs.values()), 1):
print(sum_probs)

# impact of projections on distributions
axs1 = axs[1:3, :]
plot_comparison(self, projection_dists, axs1, smooth)

# up and down decomp
ud_dists = up_down_distributions(self)

# plot UD
axs1 = axs[3, :]
plot_up_down(self, ud_dists, axs1)
if plot:
# figure for all plots
fig, axs = plt.subplots(4, 3, figsize=(
3 * 3.5, 4 * 2.45), constrained_layout=True)

# density and exa plots
axd = {'A': axs[0, 0], 'B': axs[0, 1], 'C': axs[0, 2]}
self.plot(axd=axd)
self.density_df.filter(regex=f'exeqa_[{regex}]').plot(ax=axd['C'])
axd['C'].set(xlabel='loss', ylabel='Conditional expectation')

# impact of projections on distributions
axs1 = axs[1:3, :]
plot_comparison(self, projection_dists, axs1, smooth)

# plot UD
axs1 = axs[3, :]
plot_up_down(self, ud_dists, axs1)

compare = price_compare(self, dn, projection_dists, ud_dists)
compare['umd'] = compare['up'] - compare['down']

RiskProgression = namedtuple('RiskProgression', ['compare_df', 'projection_dists', 'ud_dists'])
ans = RiskProgression(compare, projection_dists, ud_dists)
return ans

63 changes: 36 additions & 27 deletions aggregate/portfolio.py
Original file line number Diff line number Diff line change
Expand Up @@ -653,8 +653,8 @@ def __str__(self):
empex = np.nan
isupdated = False
else:
ex = self.audit_df.loc['total' 'Mean']
empex = self.audit_df.loc['total' 'EmpMean']
ex = self.audit_df.loc['total', 'Mean']
empex = self.audit_df.loc['total', 'EmpMean']
isupdated = True

s = [f'Portfolio object {self.name:s}',
Expand Down Expand Up @@ -3584,7 +3584,7 @@ def price(self, p, distortion=None, *, allocation='lifted', view='ask', efficien
:return: PricingResult namedtuple with 'price', 'assets', 'reg_p', 'distortion', 'df'
"""

warnings.warn('In 0.13.0 the default allocation will become linear not lifted.', DeprecationWarning)
# warnings.warn('In 0.13.0 the default allocation will become linear not lifted.', DeprecationWarning)

assert allocation in ('lifted', 'linear'), "allocation must be 'lifted' or 'linear'"
PricingResult = namedtuple('PricingResult', ['df', 'price', 'price_dict', 'a_reg', 'reg_p'])
Expand Down Expand Up @@ -3630,7 +3630,7 @@ def price(self, p, distortion=None, *, allocation='lifted', view='ask', efficien
df['a'] = df.P + df.Q
df['LR'] = df.L / df.P
df['PQ'] = df.P / df.Q
df['ROE'] = df.M / df.Q
df['COC'] = df.M / df.Q
price[k] = last_price = df.loc['total', 'P']
dfs[k] = df.sort_index()

Expand All @@ -3641,7 +3641,8 @@ def price(self, p, distortion=None, *, allocation='lifted', view='ask', efficien
elif allocation == 'linear':
# code mirrors pricing_bounds
# slice for extracting
sle = slice(self.bs, a_reg)
# sle = slice(self.bs, a_reg)
sle = slice(0, a_reg)
S = self.density_df.loc[sle, ['S']].copy()
loss = self.density_df.loc[sle, ['loss']]
# deal losses for allocations
Expand All @@ -3664,45 +3665,53 @@ def price(self, p, distortion=None, *, allocation='lifted', view='ask', efficien
gps = pd.DataFrame(-np.diff(gS, prepend=1, axis=0), index=S.index)

if self.sf(a_reg) > (1 - self.density_df.p_total.sum()):
print('Adjusting tail losses, but skipping\n'
f'Triggering sf(areg) > 1 - p_total: {self.sf(a_reg):.5g} code ')
# logger.info(f'Triggering sf(areg) > 1 - p_total: {1-self.sf(a_reg):.5g} code ')
# NOTE: this adjustment requires the whole tail; it has been computed in
# density_df. However, when you come to risk adjusted version it hasn't
# been computed. That's why the code above falls back to apply distortion.
# see notes below in slow method

# print('Adjusting tail losses')
# painful issue here with the naming leading to
rner = lambda x: x.replace('exi_xgta_', 'exeqa_')
# this regex does not capture the sum column if present
exeqa.loc[a_reg, :] = self.density_df.filter(regex='exi_xgta_.+$(?<!exi_xgta_sum)').\
rename(columns=rner).loc[a_reg - self.bs] * a_reg
# the lifted/natural difference is that here scenarios in the tail are not re-
# weighted using risk adjusted probabilities. They are collapsed with objective
# probs.

# these are at the layer level
if 1:
# painful issue here with the naming leading to
rner = lambda x: x.replace('exi_xgta_', 'exeqa_')
# this regex does not capture the sum column if present
exeqa.loc[a_reg, :] = self.density_df.filter(regex='exi_xgta_.+$(?<!exi_xgta_sum)').\
rename(columns=rner).loc[a_reg - self.bs] * a_reg
# there is no exi_xgta_total, so that comes out as missing
# need to fill in value
if np.isnan(exeqa.loc[a_reg, 'exeqa_total']):
exeqa.loc[a_reg, 'exeqa_total'] = exeqa.loc[a_reg].fillna(0).sum()
# the lifted/natural difference is that here scenarios in the tail are not re-
# weighted using risk adjusted probabilities. They are collapsed with objective
# probs.

# these are at the layer level, these compute Eq 14.20 p. 372
# note that by construction S(a) = 0 so there is no extra mass at the end
exp_loss = ((ps.to_numpy() * self.bs) / loss.to_numpy() * exeqa )[::-1].cumsum()[::-1]
alloc_prem = ((gps.to_numpy() * self.bs) / loss.to_numpy() * exeqa)[::-1].cumsum()[::-1]
margin = alloc_prem - exp_loss

# deal with last row KLUDGE, s=0, coc = gs-s/(1-gs)=0
# think about what this should be... poss shift?

# reciprocal cost of capital = capital / margin = 1 - gS / (gS - S)
rcoc = (1 - gS) / (gS - S)
# this can have quirkiness on the left
left = self.q(1e-6)
if left > 0:
rcoc.loc[:left, 'S'] = rcoc.loc[left, 'S']
# left = self.bs # this is a kludge
# compute 1/roe at s=1
gprime = v.g_prime(1)
fv = gprime / (1 - gprime)
# print(f'computed s=1 capital factor={fv}')
# if gS-S=0 then gS=S=1 is possible (certain small losses); then fully loss funded, no equity, hence:
rcoc = rcoc.fillna(fv).shift(1, fill_value=fv)
# at S=0 also have gS-S=0, could have infinite
capital = margin * rcoc.values

exp_loss_sum = exp_loss.sum()
alloc_prem_sum = alloc_prem.sum()
capital_sum = capital.sum()
# from IPython.display import display as d2
# d2(pd.concat((S, gS, rcoc, self.density_df.filter(regex='exi_xgta_').loc[sle], margin, capital), axis=1,
# keys=['S', 'gS', 'rcoc', 'alpha', 'margin', 'capital']))

# these are integrals of alpha S and beta gS
exp_loss_sum = exp_loss.replace([np.inf, -np.inf, np.nan], 0).sum()
alloc_prem_sum = alloc_prem.replace([np.inf, -np.inf, np.nan], 0).sum()
capital_sum = capital.replace([np.inf, -np.inf, np.nan], 0).sum()

df = pd.concat((exp_loss_sum, alloc_prem_sum, capital_sum), axis=1, keys=['L', 'P', 'Q']) . \
rename(index=lambda x: x.replace('exeqa_', '')). \
Expand Down

0 comments on commit a06c2fc

Please sign in to comment.