diff --git a/.gitignore b/.gitignore index 26fad6f..7c9a6a3 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,6 @@ vignettes/*.pdf # Shiny token, see https://shiny.rstudio.com/articles/shinyapps.html rsconnect/ + +# VIM swap files +*.swp diff --git a/IntroHCR/app.R b/IntroHCR/app.R new file mode 100644 index 0000000..5fbaf82 --- /dev/null +++ b/IntroHCR/app.R @@ -0,0 +1,251 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Load packages ---- +library(shiny) +library(tidyr) # Could Try to avoid it and reduce weight of packages +library(dplyr) # Just used for bind_rows() at the moment - change data structure of PIs to avoid this +library(ggplot2) +library(RColorBrewer) + +# Source helpers ---- +source("../R/funcs.R") +source("../R/plots.R") +source("../R/modules.R") + +# User interface ---- +#ui <- fluidPage( +# titlePanel("What is a Harvest Control Rule?"), +ui <- navbarPage( + title="What is a Harvest Control Rule?", + tabPanel("What is a Harvest Control Rule?", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + #mp_params_setterUI("mpparams", mp_visible=c("Constant catch","Threshold catch"), init_thresh_max_catch=140), + mp_params_setterUI("mpparams", mp_visible=c("Threshold catch","Constant catch"), init_thresh_max_catch=140), + #mp_params_setterUI("mpparams", mp_visible=c("Threshold catch"), init_thresh_max_catch=140), + br(), + # Buttons + tags$span(title="Project forward one year", + actionButton("advance", "Advance") + ), + tags$span(title="Reset current projection", + actionButton("reset", "Reset") + ), + br(), + # Stochasticity module + stoch_params_setterUI("stoch", init_prod_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, show_var=FALSE) + ), + mainPanel( + # Main plot for the Intro to HCR app + # 2 x 2 panel + # Catch | HCR + # ---------------- + # B/K | connecting arrow + fluidRow( + column(6, tags$span(title="Plot of the total catch. The blue, dashed horizontal line is next years catch limit that has been set by the HCR. The grey, dashed horizontal lines are the catch limits that were set by the HCR in the past.", + plotOutput("plotcatch", width="auto")) + ), + column(6, tags$span(title="The HCR. The blue, dashed vertical line shows the current estimated biomass that is used as the input. The blue, dashed horizontal line shows the resulting catch limit that will be set for the following year", + plotOutput("plothcr", width="auto")) + ) + ), + fluidRow( + column(6, tags$span(title="The biomass of the stock (scaled by the unfished biomass). When the variability options are switched on, the black line is the 'true' biomass and the blue line is the 'estimated' biomass. The HCR uses the estimated biomass for the input.", + plotOutput("plotbiomass", width="auto")) + ), + column(6, tags$span(title="The current estimated biomass is used as the input to the HCR.", + plotOutput("plotarrow", width="auto")) + ) + ) + ) + ) + ), + # Tab for choosing stock parameters, stock history, no. iterations etc + tabPanel("Settings", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br() + ), + mainPanel(width=9, + fluidRow(column=12, + # Stock LH setter + stock_params_setterUI("stock"), + # Total number of years (including historical years) + tags$span(title="The total number of years in the projection", + numericInput("nyears", "Number of years", value = 30, min=20, max=100, step=1) + ) + ) + ) + ) + ), + tabPanel("Information", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + maintainer_and_licence() + #tags$footer("test") + ), + mainPanel(width=9, + h1("Instructions"), + p("This application attempts to introduce the fundamental idea behind a Harvest Control Rule (HCR)."), + p("By default a ", em("Threshold catch"), " style of HCR is in operation. The parameters of the HCR are set using the sliders on the left-hand slide ", em("(Blim, Belbow, Cmin and Cmax)"), ". Changing the parameters will reset the current projection"), + p("The ", em("Threshold catch"), " HCR takes the ", em("estimated"), "value of biomass as the input. This can be seen by following the blue arrow from the biomass plot at the bottom left to the HCR plot at the top right. The HCR uses the biomass input to set the catch limit in the next timestep. The vertical, blue dashed line on the HCR plot shows the current estimated value of the stock biomass. The horizontal, blue dashed line on the HCR plot is the catch limit set for the next timestep. The horizontal, blue dashed line is also shown on the catch plot at the top. It shows where the catch will be in the next timestep. The process moves anti-clockwise."), + p("Pressing the ", strong("Advance"), " button steps the projection forward by one timestep. The projection is performed by fishing the stock at the level set by the HCR and calculating the response of the stock. The response of the stock is a combination of the stock dynamics and the impacts of fishing. Pressing the ", strong("Reset"), "button resets the projection"), + p("When the ", strong("Advance"), " button is pressed you should see that the next catch goes to where the blue dashed line was."), + p("The ghosts of catch limits from the past are shown as grey dashed lines on the catch plot and as grey dots on the HCR plot. These allow you to see which parts of the HCR have been active."), + h2("Variability"), + p("Variability can be included in the projection in two ways: through variability in the stock productivity and through the estimated level of stock biomass being different to the true level of the stock biomass. These options are initially turned off. The options can be seen by clicking on the ", strong("Show variability options"), "box."), + p("Biological productivity variability represents the variability of the natural procesess of the stock, for example growth and natural mortality. Increasing the variability will increase the 'bumpiness' of the stock trajectory. As biological variability is always encountered in fisheries it is essential that a selected HCR is robust to the variability."), + p("Estimation error simulates the difference between the true level of the stock biomass and the estimated level. Unfortunately, the true abundance of a fish stock is never known. Instead, estimates of abundance are made, for example using stock assessment models. The HCR uses the estimated biomass, not the true biomass. This means that the catch limit that is set by the HCR is based on estimated biomass. If the biomass is estimated poorly the resulting catch limit set by the HCR may not be appropriate."), + p("Here, estimation error is modelled using two different processes: random error and consistent bias (positive or negative). The bias represents situations where the biomass is consistently over or under estimated."), + p("When estimation error is active the biomass plot shows two lines. The black line shows the true biomass, the blue line shwows the estimated biomass. It is the blue line that feeds the HCR. Increasing the estiamation bias and variability will increase the difference between these lines.") + ) + ) + ) +) + +server <- function(input, output,session) { + +# How to reset all this + # Globals for testing - make them reactive later? + app_params <- list(initial_year = 1990, last_historical_timestep = 10) + app_params$historical_timesteps = 1:app_params$last_historical_timestep + # Aus tiger prawn + # MSY = r K / 4 + # BMSY = K / 2 + # FMSY = r/2 + + # Modules for the stochasticity and MP parameters! + get_mp_params <- callModule(mp_params_setter, "mpparams") + get_stoch_params <- callModule(stoch_params_setter, "stoch") + get_lh_params <- callModule(stock_params_setter, "stock") + # Join these together into a single object to be passed to the funcs - bit clumsy as I will have to do this in all the servers + get_stock_params <- reactive({ + sp <- get_stoch_params() + lh <- get_lh_params() + out <- c(sp, lh) + return(out) + }) + + # Make the reactive stock object and fill it up with initial values + # stock cannot just be a calculated value returned from a reactive() as it needs to persist + # i.e. here the next timestep depends on the previous timestep + # it's empty but has structure + stock <- create_stock() + + # Initialise it - do as one step? + # Stock is reactive so passed by reference? + # Use isolate else error (function tries to be reactive to changes in get_stock_params() and stock itself + niters <- 1 # We only have 1 iteration for this stock + isolate(reset_stock(stock=stock, stock_params = get_stock_params(), mp_params=get_mp_params(), app_params=app_params, initial_biomass=get_stock_params()$b0, nyears=input$nyears, niters=niters)) + # Need a better way of setting the first stock up + + # To keep track of current timestep + timestep <- reactiveVal(app_params$last_historical_timestep) + + # Reset the stock if any of the controls are fiddled with + # Just side effects so use an observer + observe({ + req(input$nyears) + # If any of the following change the observer gets triggered + #mp_params <- get_mp_params() + nyears <- input$nyears + input$reset + timestep(app_params$last_historical_timestep) + mp_params <- get_mp_params() + stock_params <- get_stock_params() + #stock_params <- get_stock_params() # The output is stored here because we need it for the reset_stock() function + # Need isolate here because calling reset_stock() causes stock to change which triggers this observe() resulting + # in an infinite loop + isolate(reset_stock(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, initial_biomass=get_stock_params()$b0, nyears=nyears, niters=niters)) + }) + + # Advance timestep by 1 if the action button is pressed + observeEvent(input$advance, { + if(timestep() < dim(stock$catch)[2]){ + timestep(timestep() + 1) + out <- project(stock, + timesteps=c(timestep(),timestep()), + stock_params=get_stock_params(), + mp_params=get_mp_params(), + app_params=app_params) + stock$biomass <- out$biomass + stock$effort <- out$effort + stock$hcr_ip <- out$hcr_ip + stock$hcr_op <- out$hcr_op + stock$catch <- out$catch + } + }) + + ## Call the HCR plot function + #output$hcrplot <- renderPlot({ + # plot_hcr(stock=stock, mp_params=get_mp_params(), stock_params=get_stock_params(), type="stepping") + #}) + # + ## stock is a reactive value so changes to it will cause the plot to be called + #output$biomassplot <- renderPlot({ + # plot_biomass(stock=stock, stock_params=get_stock_params()) + #}) + + #output$bdplot <- renderPlot({ + # plot_bmd(biomass=stock$biomass, catch=stock$catch, stock_params=stock_params, mp_params=mp_params) + #}) + # + #output$hcrplot <- renderPlot({ + # #plot_bmd(biomass=stock$biomass, catch=stock$catch, stock_params=stock_params) + # current_biomass <- stock$biomass[,timestep()] + # plot_hcr(mp_params=mp_params, stock_params=stock_params, biomass=current_biomass) + #}) + # + #output$altplot <- renderPlot({ + # plot_alt(biomass=stock$biomass, biomass_obs=stock$biomass_obs, catch=stock$catch, timestep(), stock_params=stock_params, mp_params=mp_params, last_historical_timestep=last_historical_timestep) + # } + # , + # height = function() { + # session$clientData$output_altplot_width * 2/3} + #) + + # Plots all 4 panels but in a single figure - hard to get it so it is not squashed + # Looks better if all 4 panels are plotted separately + output$intro_hcr_plot <- renderPlot({ + plot_hcr_intro(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, timestep = timestep()) + }) + + output$plotcatch <- renderPlot({ + plot_catch(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, timestep=timestep(), main="Catch") + }) + + output$plothcr <- renderPlot({ + plot_hcr(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, timestep=timestep()+1) + }) + + + output$plotbiomass <- renderPlot({ + plot_biomass(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), timestep=timestep()+1, main="SB / SBF=0") + }) + + output$plotarrow <- renderPlot({ + plot_hcr_intro_arrow(stock=stock, timestep=timestep()+1-get_mp_params()$timelag) + }) + + # Termination script - needed when running from bat file + session$onSessionEnded(function() { + stopApp() + }) + + +} + +# Run the app +shinyApp(ui, server) diff --git a/IntroHCR/www/spc.png b/IntroHCR/www/spc.png new file mode 100644 index 0000000..950a3bd Binary files /dev/null and b/IntroHCR/www/spc.png differ diff --git a/IntroUncertainty/app.R b/IntroUncertainty/app.R new file mode 100644 index 0000000..a44b96c --- /dev/null +++ b/IntroUncertainty/app.R @@ -0,0 +1,276 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Load packages ---- +library(shiny) +library(tidyr) # Could Try to avoid it and reduce weight of packages +library(dplyr) # Just used for bind_rows() at the moment - change data structure of PIs to avoid this +library(ggplot2) +library(RColorBrewer) + +# Source helpers ---- +source("../R/funcs.R") +source("../R/plots.R") +source("../R/modules.R") + +# Add constant catch option (to illustrate stochasticity with no HCR) +# User interface ---- +#ui <- fluidPage( + #theme = "bootstrap.css", +ui <- navbarPage( + title="Investigating uncertainty", + tabPanel("Investigating uncertainty", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + # HCR options + # Use same MP module as the Intro to HCRs - nice! + #mp_params_setterUI("mpparams", mp_visible=c("Constant catch","Threshold catch")), + mp_params_setterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch", "Threshold effort", "Constant effort")), + br(), + tags$span(title="Run the projection for one more iteration.", + actionButton("project", "Run projection") + ), + tags$span(title="Reset the projection.", + actionButton("reset", "Reset") + ), + br(), + # Stochasticity options + # Use same stochasticity module as the Intro to HCRs - nice! + stoch_params_setterUI("stoch", init_prod_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, show_var=FALSE) + ), + mainPanel(width=9, + # Column 1 - 2 rows + # I want the height of these two rows to be the same as the height of the 3 rows in the previous column + column(6, + fluidRow( + tags$span(title="The HCR. The blue points show the inputs and outputs from all years of the the last iteration. The grey points show the inputs and outputs from all years from all iterations. This enables you to see which parts of the HCR shape are most used.", + plotOutput("plothcr") + ) + ), + fluidRow( + #div(tableOutput("hcrpis"), style = "font-size:100%"), + tags$span(title="The number of iterations run so far.", + textOutput("itercount")), + checkboxInput("show_pis", label = "Show performance indicators", value=FALSE), + conditionalPanel(condition="input.show_pis == true", + tags$span(title="A table of various performance indicators calculated over the short-, medium- and long- term. The value is the median. The values in the brackets are the 20th and 80th percentiles respectively. See the information tab for more details", tableOutput("hcrpis"), style = "font-size:100%") + ) + ) + ), + # Column 3 - has 3 rows + column(6, + fluidRow( + tags$span(title="Plot of SB/SBF=0. The black line shows the 'true' biomass in the current iteration. The blue line shows the estimated biomass in the current iteration. The grey lines show the previous iterations. The histogram shows the range of values in the final year.", + plotOutput("plotbiomasshisto",height="250px") + ) + ), + fluidRow( + tags$span(title="Plot of the catch. The black line shows the current iteration. The grey lines show the previous iterations. The histogram shows the range of values in the final year.", + plotOutput("plotcatchhisto",height="250px") + ) + ), + fluidRow( + tags$span(title="Plot of the CPUE relative to the CPUE in the year 2000. The black line shows the current iteration. The grey lines show the previous iterations. The histogram shows the range of values in the final year.", + plotOutput("plotrelcpuehisto",height="250px") + ) + ) + ) + ) + )), + # Tab for choosing stock parameters, stock history, no. iterations etc + tabPanel("Settings", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br() + ), + mainPanel(width=9, + fluidRow(column=12, + #stoch_params_setterUI("stoch", init_prod_sigma=0.15, init_est_sigma=0.03, init_est_bias=0.0), + stock_params_setterUI("stock"), + # Total number of years (including historical years) + numericInput("nyears", "Number of years", value = 30, min=20, max=100, step=1) + ) + ) + ) + ), + tabPanel("Information", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + maintainer_and_licence() + #tags$footer("test") + ), + mainPanel(width=9, + h1("Instructions"), + #h2("Variability"), + p("Variability can be included in the projection in two ways: through variability in the stock productivity and through the estimated level of stock biomass being different to the true level of the stock biomass. These options are initially turned off. The options can be seen by clicking on the ", strong("Show variability options"), "box."), + p("Biological productivity variability represents the variability of the natural procesess of the stock, for example growth and natural mortality. Increasing the variability will increase the 'bumpiness' of the stock trajectory. As biological variability is always encountered in fisheries it is essential that a selected HCR is robust to the variability."), + p("Estimation error simulates the difference between the true level of the stock biomass and the estimated level. Unfortunately, the true abundance of a fish stock is never known. Instead, estimates of abundance are made, for example using stock assessment models. The HCR uses the estimated biomass, not the true biomass. This means that the catch limit that is set by the HCR is based on estimated biomass. If the biomass is estimated poorly the resulting catch limit set by the HCR may not be appropriate."), + p("Here, estimation error is modelled using two different processes: random error and consistent bias (positive or negative). The bias represents situations where the biomass is consistently over or under estimated.") + #p("When estimation error is active the biomass plot shows two lines. The black line shows the true biomass, the blue line shwows the estimated biomass. It is the blue line that feeds the HCR. Increasing the estiamation bias and variability will increase the difference between these lines.") + ) + ) + ) + +) + +server <- function(input, output,session) { + + # Globals (for the moment) + #initial_year <- 1990 + #last_historical_timestep <- 10 + #historical_timesteps <- 1:last_historical_timestep + # How to reset all this + # Globals for testing - make them reactive later? + # Bit dodgy having niters here as it will not reflect the actual niters + # Maybe have iter outside of app_params + # Here niters is the initial number of iters in a new stock object when reset is triggered + app_params <- list(initial_year = 1990, last_historical_timestep = 10) + app_params$historical_timesteps = 1:app_params$last_historical_timestep + quantiles <- c(0.20, 0.80) + # Where to put these Ref Pts? + # MSY = r K / 4 + # BMSY = K / 2 + # FMSY = r/2 + # Aus tiger prawn + # Modules for the stochasticity and MP parameters! + pitemp <- reactiveVal(NULL) + + get_stoch_params <- callModule(stoch_params_setter, "stoch") + get_lh_params <- callModule(stock_params_setter, "stock") + # Join these together into a single object to be passed to the funcs - bit clumsy as I will have to do this in all the servers + get_stock_params <- reactive({ + sp <- get_stoch_params() + lh <- get_lh_params() + out <- c(sp, lh) + return(out) + }) + + #get_stock_params <- callModule(stoch_params_setter, "stoch") + get_mp_params <- callModule(mp_params_setter, "mpparams") + + # The stock + stock <- create_stock() + # Initialise it - do as one step? + # Stock is reactive so passed by reference? + niters <- 1 # We have 1 initial iter. More are added to it but this is accounted for using the iter() function + # Use isolate else error (function tries to be reactive to changes in get_stock_params() and stock itself + isolate(reset_stock(stock=stock, stock_params = get_stock_params(), mp_params=get_mp_params(), app_params=app_params, initial_biomass=get_stock_params()$b0, nyears=input$nyears, niters=niters)) + + # Set iter as a reactive value. If this changes, it triggers other stuff + iter <- reactiveVal(0) + + # Reset + observe({ + req(input$nyears) + # If any of the following change the observer gets triggered + mp_params <- get_mp_params() + input$reset + #timestep(app_params$last_historical_timestep) + stock_params <- get_stock_params() # The output is stored here because we need it for the reset_stock() function + # Reset iterations + iter(0) + pitemp(NULL) + # Need isolate here because calling reset_stock() causes stock to change which triggers this observe() resulting + # in an infinite loop + isolate(reset_stock(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, initial_biomass=stock_params$b0, nyears=input$nyears, niters=niters)) + }) + + # Project works by binding new rows (iterations) to the existing ones + # Project all timesteps + observeEvent(input$project, { + iter(iter() + 1) + # If nrow stock < iter, add a row biomass, biomass_obs and catch + # This new will contain the values for the new iteration + if (nrow(stock$biomass) < iter()){ + # rbind is dropping dimnames - really annoying - so hack + dnames <-names(dimnames(stock$biomass)) + stock$biomass <- rbind(stock$biomass,stock$biomass[1,]) + #stock$biomass_obs <- rbind(stock$biomass_obs,stock$biomass_obs[1,]) + stock$hcr_ip<- rbind(stock$hcr_ip,stock$hcr_ip[1,]) + stock$hcr_op<- rbind(stock$hcr_op,stock$hcr_op[1,]) + stock$catch <- rbind(stock$catch,stock$catch[1,]) + stock$effort <- rbind(stock$effort,stock$effort[1,]) + # Bit hacky but rbind is dropping the dimnames names + names(dimnames(stock$biomass)) <- dnames + names(dimnames(stock$effort)) <- dnames + names(dimnames(stock$hcr_ip)) <- dnames + names(dimnames(stock$hcr_op)) <- dnames + names(dimnames(stock$catch)) <- dnames + } + # Pass in current iter + # project() takes entire stock + # Here we only want one iteration - subset out using fancy lapply with [ operator + + stock_iter <- lapply(reactiveValuesToList(stock), '[', iter(),,drop=FALSE) + out <- project(stock_iter, + timesteps=c((app_params$last_historical_timestep+1),dim(stock$biomass)[2]), + stock_params=get_stock_params(), + mp_params=get_mp_params(), + app_params=app_params) + stock$biomass[iter(),] <- out$biomass + stock$effort[iter(),] <- out$effort + stock$hcr_ip[iter(),] <- out$hcr_ip + stock$hcr_op[iter(),] <- out$hcr_op + stock$catch[iter(),] <- out$catch + + # Get summary PIs + piqs <- get_summary_pis(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, + quantiles=c(0.01,quantiles, 0.99)) # Quantiles expecting length 4 + pitemp(piqs) + }) + + # Call the HCR plot function + output$plothcr <- renderPlot({ + plot_hcr(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params) + }) + + output$plotbiomasshisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="biomass", quantiles=quantiles) + }) + + output$plotcatchhisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="catch", quantiles=quantiles) + }) + + output$plotrelcpuehisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="relcpue", app_params=app_params, quantiles=quantiles) + }) + + output$itercount <- renderText({ + paste("Number of iterations: ", iter(), sep="") + }) + + + output$hcrpis <- renderTable({ + # Don't print table unless project has been pressed + #if (show_table() == 0){ + if (is.null(pitemp())){ + return()} + # Use pitemp() to fill table + current_pi_table(pitemp()) + }, + rownames = TRUE, + caption= "Performance indicators", + auto=TRUE + ) + + # Termination script - needed when running from bat file + session$onSessionEnded(function() { + stopApp() + }) + +} + +# Run the app +shinyApp(ui, server) diff --git a/IntroUncertainty/www/spc.png b/IntroUncertainty/www/spc.png new file mode 100644 index 0000000..950a3bd Binary files /dev/null and b/IntroUncertainty/www/spc.png differ diff --git a/MeasuringPerformance/app.R b/MeasuringPerformance/app.R new file mode 100644 index 0000000..6fbe22e --- /dev/null +++ b/MeasuringPerformance/app.R @@ -0,0 +1,432 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Load packages ---- +library(shiny) +library(tidyr) # Could Try to avoid it and reduce weight of packages +library(dplyr) # Just used for bind_rows() at the moment - change data structure of PIs to avoid this +library(ggplot2) +library(RColorBrewer) + +# Source helpers ---- +source("../R/funcs.R") +source("../R/plots.R") +source("../R/modules.R") + +ui <- navbarPage( + title="Measuring performance", + # Front tab + # Choosing and running the HCR + tabPanel("HCR Selection", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + # HCR options + #mp_params_setterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch")), + mp_params_setterUI("mpparams", mp_visible=c("Threshold catch", "Constant catch", "Threshold effort", "Constant effort")), + #mp_params_setterUI("mpparams"), # All HCR types + br(), + br(), + #actionButton("project", "Project", icon=icon("fish")), # fish icon not working yet v5.1 + actionButton("project", "Project HCR"), + br(), + actionButton("add_basket", "Add HCR to basket", icon=icon("shopping-basket")), + br(), + # This should reset everything - empty the stores + actionButton("empty_basket", "Empty basket"), + # How many HCRs do we have in the store + textOutput("nstoredstocks") + ), + mainPanel(width=9, + column(6, + fluidRow( + tags$span(title="The HCR. The grey points show the inputs and outputs from all years from all iterations. This enables you to see which parts of the HCR shape are most used.", plotOutput("plothcr") + ) + ), + fluidRow( + tags$span(title="A table of various performance indicators calculated over the short-, medium- and long- term. The value is the median. The values in the brackets are the 20th and 80th percentiles respectively. See the information tab for more details", tableOutput("currenthcrpis"), style = "font-size:100%") + ) + ), + # Column 3 - has 3 rows + column(6, + fluidRow( + tags$span(title="Plot of SB/SBF=0. The grey envelope contains the 20-80 percentiles. The blue dashed line is median. The black lines are several iterations to illustrate the dynamics. The histogram shows the range of values in the final year.", + plotOutput("plotbiomasshisto",height="250px") + #plotOutput("plotbiomasshisto") + ) + ), + fluidRow( + tags$span(title="Plot of the catch. The grey envelope contains the 20-80 percentiles. The blue dashed line is median. The black lines are several iterations to illustrate the dynamics. The histogram shows the range of values in the final year.", + plotOutput("plotcatchhisto",height="250px") + #plotOutput("plotcatchhisto") + ) + ), + fluidRow( + tags$span(title="Plot of the CPUE relative to the CPUE in the year 2000. The grey envelope contains the 20-80 percentiles. The blue dashed line is median. The black lines are several iterations to illustrate the dynamics. The histogram shows the range of values in the final year.", + plotOutput("plotrelcpuehisto",height="250px") + #plotOutput("plotrelcpuehisto") + ) + ) + ) + ) + ) + ), + tabPanel("Compare performance", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + # PI selection + # Keep this updated with the available PIs + checkboxGroupInput(inputId = "pichoice", label="PI selection", inline=TRUE, # inline does not seem to work... + # character(0) means no choice is available + choices = character(0)), + br(), + # Dynamic HCR selection + # See: https://shiny.rstudio.com/reference/shiny/1.0.0/updateCheckboxGroupInput.html + checkboxGroupInput(inputId = "hcrchoice", label="HCR selection", + # character(0) means no choice is available + choices = character(0)), + br() + # Choice of plotting style: + # bar plot, box plot, radar + # time series comparison? + ), + mainPanel(width=9, + tabsetPanel(id="comparisontabs", + tabPanel(title="Performance indicators - medians", + value="PImeds", + column(12, fluidRow( # Difference with row(column( ? + #fluidRow(column(12, + tags$span(title="Bar plot of the median values of the performance indicators over the three time periods. Note that the PIs for effort and variability have been transformed so that the larger the value, the better the HCR is thought to be performing.", + plotOutput("plotpimed", height="600px")) + )) + ), + tabPanel(title="Performance indicators - boxplots", + value="PIbox", + column(12, fluidRow( + tags$span(title="Box plot of the values of the performance indicators over the three time periods. The box contains the 20-80 percentiles, the tails the 5-95 percentils.", + plotOutput("plotpibox", height="600px")) + )) + ), + tabPanel(title="Performance indicators - radar", + value="PIradar", + column(12, fluidRow( + tags$span(title="Radar plot of the median values of the performance indicators over the three time periods. Note that the PIs for effort and variability have been transformed so that the larger the value, the better the HCR is thought to be performing.", + plotOutput("plotpiradar", height="600px")), + "Note that stability PIs and relative effort are not shown on the radar plot." + )) + ), + tabPanel(title="Performance indicators - table", + value="PItable", + column(12, fluidRow( + "Performance indicators in the long-term", + div(tags$span(title="Peformance indicators in the long-term. The value is the median, the values in the parentheses are the 20-80 percentiles.", tableOutput("bigpitable"), style = "font-size:85%")) + )) + ), + tabPanel(title="Majuro plots", + value="majuroall", + column(12, fluidRow( + tags$span(title="Majuro plot of the trajectories of the stocks under the different HCRs.", + plotOutput("plotmajuroall", height="600px")) + )) + ), + tabPanel(title="Time series", + value="timeseries", + tags$span(title="Time series plots of various metrics for the stocks under the different HCRs. The envelope contains the 20-80 percentiles of the distribution. The dashed line is the median value. Some individual trajectories can be shown by selecting the 'Show spaghetti' option.", + fluidRow(column(12, plotOutput("plottimeseries", height="600px")))), + fluidRow(column(12, checkboxInput("spaghetti", "Show spaghetti", FALSE))) + ) + ) + ) + ) + ), + # Tab for choosing stock parameters, stock history, no. iterations etc + # Changing any of these will empty the basket + tabPanel("Settings", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + "Note that changing any of these settings will reset the current stock and empty the basket", + br() + ), + mainPanel(width=9, + fluidRow(column=12, + stoch_params_setterUI("stoch", init_prod_sigma=0.2, init_est_sigma=0.04, init_est_bias=0.0), + stock_params_setterUI("stock"), + # Total number of years (including historical years) + numericInput("nyears", "Number of years", value = 30, min=20, max=100, step=1), + # Number of iteration + numericInput("niters", "Number of iterations", value = 100, min=10, max=1000, step=10) + ) + ) + ) + ), + tabPanel("Information", + sidebarLayout( + sidebarPanel(width=3, + br(), + img(src = "spc.png", height = 100), + br(), + br(), + maintainer_and_licence() + #tags$footer("test") + ), + mainPanel(width=9, + h1("General idea"), + p("Choose HCRs from the drop down menu and set the parameters."), + p("Project the stock forward under the chosen HCR by clicking on the ", strong("Project HCR"), " button. If you like the look of the output, add the HCR to the basket by clicking on the ", strong("Add to basket"), " button"), + p("Keep adding HCRs to your basket until you are ready to compare them."), + h1("Performance indicators"), + p("There are 8 PIs in the table. ", em("SB/SBF=0"), " and ", em("Catch"), " are fairly self explanatory. ", em("Effort (rel. 1999)"), " and ", em("CPUE (rel. 1999)"), " are the fishing effort and CPUE relative to their values in 1999 respectively. ", em("Prob. SB > LRP"), " is the probability of of SB/SBF=0 being above the LRP. ", em("Catch variability"), ",", em("Effort variability"), " and ", em("CPUE variability"), " measure the variability in the catch, relative effort and relative CPUE respectively. The variability PIs measure the bumpiness over time. The higher the value, the more the value changes over time."), + p("It should be noted that these PIs don't all point the same way. It is generally thought that the higher the value of ", em("SB/SBF=0"), ",", em("Prob. SB > LRP"), ",", em("Catch"), " and ", em("CPUE (rel. 1999)"), " the better the HCR is performing. However, for ", em("Effort (rel. 1999)"), "and the ", em("variability"), " PIs, lower values are preferred. The higher the effort, the greater the costs. Stable catches and effort are preferred to catches and effort that varying strongly between years. Care must therefore be taken when using PIs to compare performance of HCRs."), + h1("Comparing performance"), + p("Choose the ", strong("Compare performance"), " tab for a range of plots and tables that allow the comparison of the performance of the HCRs through performance indicators and other metrics."), + p("The performance indicators and HCRs can be selected and delselected to help with the comparison.") + ) + ) + ) +) + +server <- function(input, output,session) { + # Global parameters + app_params <- list(initial_year = 1990, # Cosmetic only + last_historical_timestep = 10) + # Storage for stocks - not actually needed - we only use the tsstore and pistore - make both reactive + # Comment out but leave in case we decide to use it + #stockstore <- reactiveVal(list()) + # pitemp is used for the PI table on the front page - needed? + pitemp <- reactiveVal(NULL) + # Storage for the PIs + pistore <- reactiveVal(list()) + # Storage for the timeseries + tsstore <- reactiveVal(list()) + # It is not possible to store something until you have projected + OKtostore <- FALSE + quantiles <- c(0.05,0.2, 0.8, 0.95) + + # Modules for the stochasticity and MP parameters! + get_mp_params <- callModule(mp_params_setter, "mpparams") + get_stoch_params <- callModule(stoch_params_setter, "stoch") + get_lh_params <- callModule(stock_params_setter, "stock") + # Join stoch and lh together into a single object to be passed to funcs - bit clumsy as I will have to do this in all the servers + get_stock_params <- reactive({ + sp <- get_stoch_params() + lh <- get_lh_params() + out <- c(sp, lh) + return(out) + }) + + # The stock + stock <- create_stock() + # Use isolate else error (function tries to be reactive to changes in get_stock_params() and stock itself + isolate(reset_stock(stock=stock, stock_params = get_stock_params(), mp_params=get_mp_params(), app_params=app_params, initial_biomass=get_stock_params()$b0, nyears=input$nyears, niters=input$niters)) + + # Store the stock in the basket and create a new empty one + observeEvent(input$add_basket, { + # Check if project has been pressed first or you may store empty results + if(OKtostore == FALSE){ + return() + } + # Store the stock, stockparams and mpparams + stock_params <- get_stock_params() + mp_params <- get_mp_params() + stock_list <- list(stock = reactiveValuesToList(stock), + stock_params = stock_params, + mp_params = mp_params) + # Add stock and summary PIs to lists + name <- paste(length(pistore())+1, ". ",mp_params$name,sep="") + # Bit faffy to update the stores + temp <- pistore() + temp[[eval(name)]] <- pitemp() + pistore(temp) + temp <- tsstore() + temp[[eval(name)]] <- get_timeseries(stock=stock, stock_params=stock_params, app_params=app_params, quantiles=quantiles[c(2,3)]) + tsstore(temp) + + # Update the available PIs - although this doesn't really dynamically change + # It just saves having to maintain a list in the UI at the top AND in the PI calculation function + updateCheckboxGroupInput(session, "pichoice", + choices = unique(pistore()[[name]]$piqs$name), + selected = unique(pistore()[[name]]$piqs$name) + ) + # You can't store again until you project again + OKtostore <<- FALSE + }) + + # Reset the current stock if you touch the MP parameters + observeEvent(get_mp_params(),{ + stock_params <- get_stock_params() + reset_stock(stock=stock, stock_params=stock_params, mp_params=get_mp_params(), app_params=app_params, initial_biomass=stock_params$b0, nyears=input$nyears, niters=input$niters) + # Reset the show table variable + pitemp(NULL) + OKtostore <<- FALSE + }) + + # Update HCR choice in comparison tab (if HCR basket gets added to or emptied) + # Alternative is to make the stock store reactive - could get a bit tricky + observe({ + input$empty_basket + input$add_basket + selected <- NULL + choices <- character(0) + if(length(pistore())>0){ + selected <- names(pistore()) + choices <- names(pistore()) + } + updateCheckboxGroupInput(session, "hcrchoice", + choices = choices, + selected = selected + ) + }) + + # Empty the basket and reset the current stock + # Basket gets emptied when: + # stochasticity options change + # nyears changes + # niters changes + # The empty basket button is pushed + # Changing the MP parameters does not trigger it + observe({ + req(input$niters, input$nyears) # Can be NA due to numericInput + # React to + input$empty_basket + stock_params <- get_stock_params() # Includes LH and stoch options + niters <- input$niters + nyears <- input$nyears + # Do not react to changes in MP params + mp_params <- isolate(get_mp_params()) + # Reset the stock + isolate(reset_stock(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, initial_biomass=stock_params$b0, nyears=nyears, niters=niters)) + # Empty the storage + pitemp(NULL) + pistore(list()) + tsstore(list()) + OKtostore <<- FALSE + }) + + # Project all timesteps + observeEvent(input$project, { + # Could add iter loop to the project() function instead of doing it here? + for (iter in 1:dim(stock$biomass)[1]){ + stock_iter <- lapply(reactiveValuesToList(stock), '[', iter,,drop=FALSE) + stock_params <- get_stock_params() + out <- project(stock_iter, + timesteps=c((app_params$last_historical_timestep+1),dim(stock$biomass)[2]), + stock_params=stock_params, + mp_params=get_mp_params(), + app_params=app_params) + stock$biomass[iter,] <- out$biomass + stock$effort[iter,] <- out$effort + stock$hcr_ip[iter,] <- out$hcr_ip + stock$hcr_op[iter,] <- out$hcr_op + stock$catch[iter,] <- out$catch + } + # Get summary PIs + piqs <- get_summary_pis(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, + quantiles=quantiles) + pitemp(piqs) + OKtostore <<- TRUE + }) + + # Call the HCR plot function + output$plothcr <- renderPlot({ + plot_hcr(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), app_params=app_params, show_last = FALSE) + }) + + output$plotbiomasshisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="biomass", show_last = FALSE, quantiles=quantiles[c(2,3)]) + }) + + output$plotcatchhisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="catch", show_last = FALSE, quantiles=quantiles[c(2,3)]) + }) + + output$plotrelcpuehisto <- renderPlot({ + plot_metric_with_histo(stock=stock, stock_params=get_stock_params(), mp_params=get_mp_params(), metric="relcpue", app_params=app_params, show_last = FALSE, quantiles=quantiles[c(2,3)]) + }) + + output$plotmajuro <- renderPlot({ + plot_majuro_single_stock(stock=stock, stock_params = get_stock_params(),quantiles=quantiles[c(2,3)]) + }) + + output$nstoredstocks <- renderText({ + paste("Number of HCRs in basket: ", length(pistore()), sep="") + }) + + output$currenthcrpis <- renderTable({ + # Don't print table unless project has been pressed + if (is.null(pitemp())){ + return()} + # Use pitemp() to fill table + current_pi_table(pitemp()) + }, + rownames = TRUE, + caption= "Performance indicators", + auto=TRUE + ) + + # The mega table for the PIs - tricky + output$bigpitable <- renderTable({ + hcr_choices <- input$hcrchoice + pi_choices <- input$pichoice + # If no HCR or PI is selected then don't do anything + if(is.null(hcr_choices) | is.null(pi_choices)){ + return() + } + big_pi_table(pis=pistore(), hcr_choices=hcr_choices, pi_choices=pi_choices) + }, + rownames = TRUE, + caption= "Performance indicators", + auto=TRUE + ) + + # Ouputs for the PI plotting tab + # You can't have the same output ID but you can set up mutltiple output ids to have the same renderPlot() function + renderPIplot <- renderPlot({ + # Names of the HCRs and PIs in the pistore that we want to plot + hcr_choices <- input$hcrchoice + pi_choices <- input$pichoice + # If no HCR or PI is selected then don't do anything + if(is.null(hcr_choices) | is.null(pi_choices)){ + return() + } + plot_pi_choice(pis=pistore(), hcr_choices=hcr_choices, pi_choices=pi_choices, plot_choice=input$comparisontabs) + }) + output$plotpibox <- renderPIplot + output$plotpimed <- renderPIplot + output$plotpiradar <- renderPIplot + + output$plotmajuroall <- renderPlot({ + hcr_choices <- input$hcrchoice + if(is.null(hcr_choices)){ + return() + } + plot_majuro_all_stocks(timeseries=tsstore(), hcr_choices=hcr_choices, stock_params=get_stock_params()) + }) + + output$plottimeseries <- renderPlot({ + # Names of the HCRs that we want to plot + hcr_choices <- input$hcrchoice + # If no HCR is selected then don't do anything + if(is.null(hcr_choices)){ + return() + } + plot_timeseries(timeseries=tsstore(), hcr_choices=hcr_choices, stock_params=get_stock_params(), show_spaghetti=input$spaghetti) + }) + + # Termination script - needed when running from bat file + session$onSessionEnded(function() { + stopApp() + }) +} + +# Run the app +shinyApp(ui, server) diff --git a/MeasuringPerformance/www/spc.png b/MeasuringPerformance/www/spc.png new file mode 100644 index 0000000..950a3bd Binary files /dev/null and b/MeasuringPerformance/www/spc.png differ diff --git a/R/funcs.R b/R/funcs.R new file mode 100644 index 0000000..32d994e --- /dev/null +++ b/R/funcs.R @@ -0,0 +1,548 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Functions and that + +# This looks pretty crummy +maintainer_and_licence <- function(){ + out <- tags$html( + tags$h1("AMPED"), + tags$p("Amazing Management Procedures Exploration Device"), + #tags$p(strong("A"),"mazing", strong("M"), "anagement ", strong("P"),"rocedure ", strong("E"),"xploration ", strong("D"),"evice"), + tags$footer( + tags$p("version 0.1.0 Rupert Trousers"), + tags$p("Copyright 2018 OFP SPC MSE Team."), + tags$p("Distributed under the GPL 3"), + tags$a("Soure code", href="https://github.com/PacificCommunity/ofp-sam-mse-popmodel-Shiny/tree/master/AMPED") + ) + ) + return(out) +} + +# Note +# F * q = effort? +# F = C / B +# Do we need an effort slot? +# We never report real effort - just relative so q gets lost +# Could probably streamline all this as effort is essentially a derived quantity - derived from catch and biomass +# Just have to be careful with how we are dealing with the effort based HCRs +# And always deal with relative CPUE and relative effort and relative F so we don't have to use q + +# Make the empty reactive stock object +create_stock <- function(){ + stock <- reactiveValues( + biomass = NULL, + hcr_ip = NULL, + hcr_op = NULL, + effort = NULL, + catch=NULL + ) + return(stock) +} + +# I think that stock is getting passed by reference as when these functions are called the return value is not stored but stock is updated +# But it does need to be passed in - i.e. it is not found in the calling environment +clear_stock <- function(stock, app_params, nyears, niters){ + initial_array <- array(NA, dim=c(niters, nyears), dimnames=list(iter=1:niters, year=app_params$initial_year:(app_params$initial_year+nyears-1))) + stock$biomass <- initial_array + stock$hcr_ip <- initial_array + stock$hcr_op <- initial_array + stock$catch <- initial_array + stock$effort <- initial_array +} + +get_catch_history <- function(stock_params, app_params, niters){ + # Return an array of catches to drive the initial dynamics of the stock + # It will depend on the stock_history parameter, r and k + # MSY = r K / 4 # BMSY = K / 2 # FMSY = r/2 + # Make one trajectory, then repeat with a load of noise on it + msy <- stock_params$r * stock_params$k / 4 + catch <- switch(stock_params$stock_history, + "under" = rep(2*msy/3, app_params$last_historical_timestep), + "fully" = rep(msy, app_params$last_historical_timestep), + #"over" = rep(3*msy/2, app_params$last_historical_timestep) + "over" = seq(from=3*msy/4, to=4*msy/3, length=app_params$last_historical_timestep) + ) + #catch <- seq(from=msy, to=msy/2,length=app_params$last_historical_timestep) + out <- array(NA,dim=c(niters, app_params$last_historical_timestep)) + out[] <- rep(catch, each=niters) + # Sling a load of noise on it + out <- out * rlnorm(prod(dim(out)),meanlog=0,sdlog=0.1) + return(out) +} + +fill_initial_stock <- function(stock, stock_params, mp_params, initial_biomass, app_params){ + stock$biomass[,1] <- initial_biomass + # First X years have history + # Need some way of setting the stock history - a choice + catch_history <- get_catch_history(stock_params, app_params, niters=dim(stock$biomass)[1]) + stock$catch[,1:app_params$last_historical_timestep] <- catch_history + + hcr_ip_yrs <- 1:(app_params$last_historical_timestep - mp_params$timelag + 1) # + 1 to match the biomass + hcr_op_yrs <- (1+mp_params$timelag):(app_params$last_historical_timestep + 1) # + 1 to match the biomass + # Need to set an initial HCR IP? + for (yr in 1:app_params$last_historical_timestep){ + # Should be able to handle multiple iters + next_biomass <- isolate(get_next_biomass(stock$biomass[,yr], stock$catch[,yr], stock_params)) + stock$biomass[,yr+1] <- next_biomass + } + # The IP and OP functions must work for iters + # yr argument is the timestep range of the HCR IP - i.e. lagging behind catch + # Should be able to handle multiple iters + stock$hcr_ip[,hcr_ip_yrs] <- get_hcr_ip(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=hcr_ip_yrs) + stock$hcr_op[,hcr_op_yrs] <- get_hcr_op(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=hcr_ip_yrs) + stock$effort <- stock$catch / (stock$biomass * stock_params[["q"]]) +} + +reset_stock <- function(stock, stock_params, mp_params, app_params, initial_biomass, nyears, niters){ + clear_stock(stock=stock, app_params=app_params, nyears, niters) + fill_initial_stock(stock=stock, stock_params=stock_params, mp_params=mp_params, initial_biomass=initial_biomass, app_params=app_params) +} + +# quantiles is of length 2, lower and upper +get_timeseries <- function(stock=stock, stock_params=stock_params, app_params=app_params, quantiles=quantiles, nspaghetti=5){ + # Make a data.frame with year, metric, quantile, value + # quantile are based on the lower and upper quantile + # Do we also want spaghetti? could do + # metrics are B/K, Catch, Rel. CPUE, F/FMSY + bk <- stock$biomass / stock_params$k + bkq <- get_quantiles(bk, quantiles) + bkq <- cbind(metric="bk", name="SB/SBF=0", level=c("lower","median","upper"),as.data.frame(bkq)) + + catchq <- get_quantiles(stock$catch, quantiles) + catchq <- cbind(metric="catch", name="Catch", level=c("lower","median","upper"),as.data.frame(catchq)) + + cpue <- stock$catch / stock$effort + rel_cpue <- sweep(cpue, 1, cpue[,app_params$last_historical_timestep], "/") + rel_cpueq <- get_quantiles(rel_cpue, quantiles) + rel_cpueq <- cbind(metric="relcpue", name="Rel. CPUE", level=c("lower","median","upper"),as.data.frame(rel_cpueq)) + + fmsy <- stock_params$r / 2 + f <- stock$catch / stock$biomass + ffmsy <- f / fmsy + ffmsyq <- get_quantiles(ffmsy, quantiles) + ffmsyq <- cbind(metric="ffmsy", name="F / FMSY", level=c("lower","median","upper"),as.data.frame(ffmsyq)) + + qout <- rbind(bkq, catchq, rel_cpueq, ffmsyq) + qout <- gather(qout, key="year", value="value",-level, -metric, -name) + qout <- cbind(qout, type="quantile") + + # Spaghetti + # Pick 5 iters at random + nspaghetti <- min(nspaghetti, dim(bk)[1]) + spaghetti_iters <- round(runif(nspaghetti, min=1, max=dim(bk)[1])) + bksp <- cbind(metric="bk", name="SB/SBF=0", level=paste("spag",as.character(spaghetti_iters),sep=""),as.data.frame(bk[spaghetti_iters,])) + catchsp <- cbind(metric="catch", name="Catch", level=paste("spag",as.character(spaghetti_iters),sep=""),as.data.frame(stock$catch[spaghetti_iters,])) + ffmsysp <- cbind(metric="ffmsy", name="F / FMSY", level=paste("spag",as.character(spaghetti_iters),sep=""),as.data.frame(ffmsy[spaghetti_iters,])) + rel_cpuesp <- cbind(metric="relcpue", name="Rel. CPUE", level=paste("spag",as.character(spaghetti_iters),sep=""),as.data.frame(rel_cpue[spaghetti_iters,])) + spout <- rbind(bksp, catchsp, rel_cpuesp, ffmsysp) + spout <- gather(spout, key="year", value="value",-level, -metric, -name) + spout <- cbind(spout, type="spaghetti") + + out <- rbind(qout,spout) + # Producing warnings about factors? + out$level <- as.character(out$level) + out$type <- as.character(out$type) + out$metric <- as.character(out$metric) + out$name <- as.character(out$name) + out$year <- as.numeric(out$year) + return(out) +} + + +# Observation error is a combination of bias and lognormally distributed noise +estimation_error <- function(input, sigma, bias){ + est_variability <- rlnorm(length(input),meanlog=0,sdlog=sigma) + output <- input * est_variability * (1 + bias) + return(output) + +} + +# The main projection function +# Projects the stock over the timesteps given and updates the biomass, observed biomass and catches +# params - named vector with r, k, p, q (catchability) +# timesteps - vector of length 2, the timesteps to project over (catch or biomass as we always get biomass in t+1?) +# biomass - array: niter * timesteps +# catch- array: niter * timesteps +# Project the population and catch over the timesteps +# We can always get the next biomass given last years catch (if there is space) +# Could just pass in whole stock object and an iteration number instead the bits +# biomass, biomass_obs and catch can have multiple iterations +# biomass, biomass_obs and catch are arrays [niters, nyears] +# timelag is lag between observed ip timing and HCR op timing, or between i.e. op in time t = f(ip in time t - timelag) +# A lag of 0 means that the catch in year Y is given by the biomass at the start of year Y (same element in the array) +# Biomass at the start of the timestep +# Biomass is one year ahead of catch +project <- function(stock, timesteps, stock_params, mp_params, app_params){ + #biomass <- stock$biomass + #biomass_obs <- stock$biomass_obs + #catch <- stock$catch + # Check timesteps - should be a range of two values + if (length(timesteps) == 1){ + timesteps <- rep(timesteps,2) + } + if (length(timesteps) != 2){ + stop("In project(). timesteps argument should be of length 2.") + } + if((timesteps[1] - mp_params$timelag) < 1){ + stop("In project(). Trying to access element in yr less than 1") + } + # Loop over the timesteps and update catch and biomass + # yr is the year we get catch for + # yr + 1 is the year we get true biomass for + for (yr in timesteps[1]:timesteps[2]){ + # Run the MP analysis to generate stock$hcr_ip, e.g. assessment to estimate B/K, or some kind of CPUE magic + # Turn the HCR op into catch - move to separate function? + #est_yr <- yr - mp_params$timelag + # Implementation function stuff + if (mp_params$output_type == "catch"){ + stock$catch[,yr] <- stock$hcr_op[,yr] + stock$effort[,yr] <- stock$catch[,yr] / (stock_params$q * stock$biomass[,yr]) + } + # Test this to death with different yr ranges etc + if (mp_params$output_type == "relative effort"){ + # HCR OP is relative to last historical effort + stock$effort[,yr] <- stock$effort[,app_params$last_historical_timestep] * stock$hcr_op[,yr] + stock$catch[,yr] <- stock$effort[,yr] * stock_params$q * stock$biomass[,yr] + } + # If room get the biomass in the next timestep (using previously set catch) + # Bt+1 = Bt + f(Bt) - Ct + # f(Bt) = r/p Bt * (1 - (Bt/k)^p) + if (yr < dim(stock$biomass)[2]){ + next_biomass <- get_next_biomass(biomass=stock$biomass[,yr], catch=stock$catch[,yr], stock_params=stock_params) + stock$biomass[,yr+1] <- next_biomass + # + stock$hcr_ip[,yr+1-mp_params$timelag] <- get_hcr_ip(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=yr+1-mp_params$timelag) + # + stock$hcr_op[,yr+1] <- get_hcr_op(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=yr+1-mp_params$timelag) + } + # Strictly we should do these separately so we can fill up the HCR IP as full as possible + # But it makes the last couple of years on the plots a bit tricky + # We lost the relationship between ip and op - (timelag is lost) - we could be stricter with the plots but this is better + # as we do not have an extra year of output + #if ((yr - mp_params$timelag) < dim(stock$biomass)[2]){ + # # Do assessment and set the catch (target) for the following year + # #stock$hcr_ip[,yr+1] <- get_hcr_ip(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=yr+1-mp_params$timelag) + # stock$hcr_ip[,yr+1-mp_params$timelag] <- get_hcr_ip(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=yr+1-mp_params$timelag) + #} + #if (yr < dim(stock$biomass)[2]){ + # stock$hcr_op[,yr+1] <- get_hcr_op(stock=stock, stock_params=stock_params, mp_params=mp_params, yr=yr+1-mp_params$timelag) + #} + } + return(stock) +} + +# The surplus production model +# General form: +# Bt+1 = Bt + f(Bt) - Ct +# Pella & Tomlinson +# f(Bt) r/p Bt * (1 - (Bt/k)^p +# set p = 1 to get Schaefer +# cpue = Ct / Et = qBt +# biomass, catch can have multiple iterations +# but this is one just timstep so we are dealing a with a vector of iterations + +# Nasty global variable - or generate all at the same time and reference it by iter and year? +corr_noise <- 0 +get_next_biomass<- function(biomass, catch, stock_params){ + fB <- (stock_params[["r"]] / stock_params[["p"]]) * biomass * (1 - (biomass / stock_params[["k"]])^stock_params[["p"]]) + # Apply lognormal noise to r + #process_variability <- rlnorm(length(biomass),meanlog=0,sdlog=stock_params[["biol_prod_sigma"]]) + #fB <- fB * process_variability + # A value of b = 0.5 is red noise, make redder by increasing (< 1) + # Currently not an input + corr_noise <<- next_corrnoise(corr_noise, b=0.5, sd=stock_params[["biol_prod_sigma"]]) + fB <- fB * (corr_noise + 1) + next_biomass <- biomass + fB - catch + # Biomass cannot be less than 1e-6 + next_biomass <- pmax(next_biomass,1e-6) + return(next_biomass) +} + +# Run the MP analyses function to generate the input to the HCR +# i.e. observed data +# e.g. observed biomass from an assessment +# analysis functions: "assessment" or NA +# yr is the timestep that use to generate the HCR IP in the current year +# e.g. look at biomass / k in year yr +get_hcr_ip <- function(stock, stock_params, mp_params, yr){ + # Check for NA in mp_analysis, if so return NA + if (is.na(mp_params$mp_analysis)){ + hcr_ip <- NA + } + # If not NA call the analysis function + else { + # Call HCR with correct inputs: + hcr_ip <- do.call(mp_params$mp_analysis, args=list(stock=stock, mp_params=mp_params, stock_params=stock_params, yr=yr)) + } + return(hcr_ip) +} + + +# Evaluates the HCR using do.call() and returns a catch for the biomass dynamic model +# Inputs to the HCRs have already been calculated and are stored in stock$hcr_ip +# Outputs from the HCR can be different (e.g. catch, catch multiplier, effort, effort multiplier) +# Evaluates one timestep (yr), multiple iterations (chance of multiple timesteps? maybe - need to be careful with dims) +# yr is the yr of the stock$hcr$input to be used +get_hcr_op <- function(stock, stock_params, mp_params, yr){ + # Shape is not NA + # Call HCR with the lagged input + #hcr_op <- do.call(mp_params$hcr_shape, args=list(stock=stock, mp_params=mp_params, stock_params=stock_params, yr=yr)) + hcr_op <- do.call(mp_params$hcr_shape, args=list(input=stock$hcr_ip[,yr,drop=FALSE], mp_params=mp_params, stock_params=stock_params, yr=yr)) + return(hcr_op) +} + +#------------------------------------------------------------------- +# HCR functions + +# Analysis functions: +# Called by get_hcr_ip() +# asessment: a stock assessment that estimates biomass / k +# The analysis assessment function +assessment <- function(stock, mp_params, stock_params, yr){ + # Return observed biomass + true_ip <- stock$biomass[,yr] / stock_params$k + est_ip <- estimation_error(input = true_ip, sigma = stock_params$biol_est_sigma, bias = stock_params$biol_est_bias) + est_ip <- pmin(est_ip, 1.0) + return(est_ip) +} + +# HCR shape functions +# Called by get_hcr_op() + +threshold <- function(input, mp_params, ...){ + output <- array(NA, dim=dim(input)) + # Below lim + output[input <= mp_params$params["lim"]] <- mp_params$params["min"] + # On the slope + grad <- (mp_params$params["max"] - mp_params$params["min"]) / (mp_params$params["elbow"] - mp_params$params["lim"]) + on_slope <- (input > mp_params$params["lim"]) & (input <= mp_params$params["elbow"]) + output[on_slope] <- ((input - mp_params$params["lim"]) * grad + mp_params$params["min"])[on_slope] + # Past the elbow + output[input > mp_params$params["elbow"]] <- mp_params$params["max"] + return(output) +} + +constant <- function(mp_params, ...){ + return(mp_params$params["constant_level"]) +} + +#------------------------------------------------------------------- +# Performance indicators + +# Other methods are available +transform_upside_down_pis <- function(x){ + #min_value <- 1e-6 + #adjuster <- -1.0001 + #x <- pmax(x, min_value) + #x <- log(x) + log(min_value) * adjuster # Now x is between just greater than 0 and something positive + #x <- (log(min_value) + log(min_value) * adjuster) / (x) + #return(x) + return(-x) # Just return the negative - simple! +} + +# Get pis by year and iter +# Stick into a single data.frame +get_pis <- function(stock, stock_params, mp_params, app_params){ + rel_year <- app_params$initial_year + app_params$last_historical_timestep - 1 + bkm <- stock$biomass / stock_params$k + # B/K + bk <- cbind(pi="bk", iter=1:nrow(bkm), as.data.frame(bkm), name="SB/SBF=0") + # Total catch + catch <- cbind(pi="catch", iter=1:nrow(stock$catch), as.data.frame(stock$catch), name="Catch") + # Variability in catch + diffcatch <- abs(t(apply(stock$catch, 1, diff))) # Need to transpose due to odd apply() behaviour + #diffcatch <- transform_upside_down_pis(diffcatch) + # Stick some NAs for the first column and name column + diffcatch <- cbind(NA, diffcatch) + colnames(diffcatch)[1] <- colnames(stock$catch)[1] + diffcatch <- cbind(pi="diffcatch", iter=1:nrow(diffcatch), as.data.frame(diffcatch), name="Catch variability") + # Relative effort + # Base effort is effort in the last historical timestep + releffort <- sweep(stock$effort, 1, stock$effort[,app_params$last_historical_timestep], "/") + releffort <- cbind(pi="releffort", iter=1:nrow(releffort), as.data.frame(releffort), name=paste("Effort (rel. ", rel_year," )", sep="")) + # Variability of relative effort + diffeffort <- abs(t(apply(stock$effort, 1, diff))) # Need to transpose due to odd apply() behaviour + #diffeffort <- transform_upside_down_pis(diffeffort) + diffeffort <- cbind(NA, diffeffort) + colnames(diffeffort)[1] <- colnames(stock$effort)[1] + diffeffort <- cbind(pi="diffeffort", iter=1:nrow(diffeffort), as.data.frame(diffeffort), name="Effort variability") + # Relative CPUE + cpue <- stock$catch / stock$effort + relcpuem <- sweep(cpue, 1, cpue[,app_params$last_historical_timestep], "/") + relcpue <- cbind(pi="relcpue", iter=1:nrow(relcpuem), as.data.frame(relcpuem), name=paste("CPUE (rel. ", rel_year, " )", sep="")) + # Variability of relative CPUE + diffcpue <- abs(t(apply(relcpuem, 1, diff))) # Need to transpose due to odd apply() behaviour + #diffcpue <- transform_upside_down_pis(diffcpue) + diffcpue <- cbind(NA, diffcpue) + colnames(diffcpue)[1] <- colnames(stock$effort)[1] + diffcpue <- cbind(pi="diffcpue", iter=1:nrow(diffcpue), as.data.frame(diffcpue), name="CPUE variability") + + # Probabilities in each year + problrp <- apply(bkm > stock_params$lrp,2,mean) + # A bit of mucking about to get a data.frame + problrp <- cbind(pi="problrp", iter=1, as.data.frame(t(problrp)), name="Prob. SB > LRP") + + # Put altogether and reshape + out <- rbind(bk, problrp, catch, diffcatch, releffort, diffeffort, relcpue, diffcpue) + # Use tidyr or something else to put into a single df + out <- gather(out, key="year", value="value", -iter, -pi, -name) + + # F***ing factors! + out$name <- as.character(out$name) + return(out) +} + +get_time_periods <- function(app_params, nyears){ + nyears <- nyears - app_params$last_historical_timestep + term_length <- floor(nyears / 3) + short_term_length <- term_length + medium_term_length <- term_length + long_term_length <- term_length + extra_bit <- nyears - (term_length * 3) + if(extra_bit == 1){ + long_term_length <- long_term_length + 1 + } + if(extra_bit == 2){ + long_term_length <- long_term_length + 1 + medium_term_length <- medium_term_length + 1 + } + short_term <- (app_params$last_historical_timestep + 1):(app_params$last_historical_timestep + short_term_length) + medium_term <- (short_term[length(short_term)] + 1):(short_term[length(short_term)] + medium_term_length) + long_term <- (medium_term[length(medium_term)] + 1):(medium_term[length(medium_term)] + medium_term_length) + return(list(short_term=short_term, medium_term=medium_term, long_term=long_term)) +} + +# Calc the PIs +get_summary_pis <- function(stock, stock_params, mp_params, app_params, quantiles){ + years <- dimnames(stock$biomass)$year + # Average them over years? S, M, L? + # Average over years - then calc quantiles + time_periods <- get_time_periods(app_params, nyears=dim(stock$biomass)[2]) + short_term <- years[time_periods[["short_term"]]] + medium_term <- years[time_periods[["medium_term"]]] + long_term <- years[time_periods[["long_term"]]] + + pis <- get_pis(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params) + pis <- pis[pis$year %in% c(short_term, medium_term, long_term),] + + # Add timeperiod column - as.data.frame to use stringsAsFactors + pis <- as.data.frame(cbind(pis, term=as.character(NA), stringsAsFactors=FALSE)) + pis[pis$year %in% short_term,"term"] <- "short" + pis[pis$year %in% long_term,"term"] <- "long" + pis[pis$year %in% medium_term,"term"] <- "medium" + # Drop the rows that do not have a term + # This seems to take a long time + + # Take median value over the timeperiods + yearav <- pis %>% group_by(pi, iter, term, name) %>% summarise(value=median(value, na.rm=TRUE)) + # Take the quantiles - + qmed <- c(quantiles, 0.5) + piqs <- yearav %>% group_by(pi, term, name) %>% summarise(quantiles = list(paste("q",qmed*100,sep="")), + value = list(quantile(value, qmed)))%>% unnest + return(list(piqs=piqs, terms=list(short=short_term, medium=medium_term, long=long_term))) +} + +get_quantiles <- function(dat, quantiles){ + qs <- apply(dat, 2, function(x){quantile(x, probs=c(quantiles[1], 0.5, quantiles[2]), na.rm=TRUE)}) + return(qs) +} + +current_pi_table <- function(pis){ + pis$piqs + piqs <- pis$piqs + # Beat this into a table + # Each row is a PI + # 4 Columns: Name, short, medium, long + # median (quantile) + + signif <- 2 + upis <- as.character(unique(piqs$pi)) + uterm <- as.character(unique(piqs$term)) + out <- data.frame() + for (upi in upis){ + tempdat <- subset(piqs, term =="short" & pi==upi) + svalues <- signif(sort(subset(piqs, term =="short" & pi==upi)$value),signif) + mvalues <- signif(sort(subset(piqs, term =="medium" & pi==upi)$value),signif) + lvalues <- signif(sort(subset(piqs, term =="long" & pi==upi)$value),signif) + # If no uncertainty + if ((svalues[2] == svalues[1]) & (svalues[4] == svalues[5]) & + (mvalues[2] == mvalues[1]) & (mvalues[4] == mvalues[5]) & + (lvalues[2] == lvalues[1]) & (lvalues[4] == lvalues[5])){ + sval <- as.character(svalues[3]) + mval <- as.character(mvalues[3]) + lval <- as.character(lvalues[3]) + } + else { + sval <- paste(svalues[3], " (", svalues[2], ",", svalues[4],")", sep="") + mval <- paste(mvalues[3], " (", mvalues[2], ",", mvalues[4],")", sep="") + lval <- paste(lvalues[3], " (", lvalues[2], ",", lvalues[4],")", sep="") + } + out <- rbind(out, data.frame(name = tempdat$name[1], short = sval, medium=mval, long=lval)) + } + rownames(out) <- out$name + # Drop rownames column + out <- out[,colnames(out) != "name"] + term_years <- pis$term + colnames(out) <- c(paste("Short (", term_years$short[1], "-", term_years$short[length(term_years$short)],")", sep=""), + paste("Med. (", term_years$medium[1], "-", term_years$medium[length(term_years$medium)],")", sep=""), + paste("Long (", term_years$long[1], "-", term_years$long[length(term_years$long)],")", sep="")) + + return(out) +} + + +big_pi_table <- function(pis, hcr_choices, pi_choices){ + if (length(pis) == 0){ + return() + } + # Put into one big DF + piqs <- lapply(pis, function(x) return(x$piqs)) + piqs <- bind_rows(piqs, .id="hcr") + + # Sanity check - if all the hcr choices are not in the PI table then something has gone wrong + if (!all(hcr_choices %in% unique(piqs$hcr))){ + stop("In big_pi_table(). Not all hcr_choices are in the hcr list.\n") + } + signif <- 2 + # Big fuck-off table: PIs along top, HRCs in the rows + # Just the long term + # Just the median and two quantiles + # which quantiles do we want? drop the "q" from the front and order + qs <- sort(as.numeric(substring(unique(piqs$quantiles),2))) + qs <- paste("q", qs[2:4],sep="") + piqs <- subset(piqs, (hcr %in% hcr_choices) & (name %in% pi_choices) & (term == "long") & (quantiles %in% qs)) + piqs$value <- signif(piqs$value, signif) + dat <- spread(piqs, key="quantiles", value="value") + # Make the text values + dat$valtxt <- as.character(NA) + # Which rows have same qs - no uncertainty + certain_rows <- c((dat[,qs[2]] == dat[,qs[1]]) & (dat[,qs[2]] == dat[,qs[3]])) + # uncertain rows + uncertain_valtx <- paste(unlist(dat[,qs[2]])," (", + unlist(dat[,qs[1]]),",", + unlist(dat[,qs[3]]),")",sep="")[!certain_rows] + certain_valtx <- paste(unlist(dat[,qs[2]]))[certain_rows] + dat$valtxt[certain_rows] <- certain_valtx + dat$valtxt[!certain_rows] <- uncertain_valtx + + dat <- dat[,c("hcr","name","valtxt")] + out <- as.data.frame(dat %>% spread(key="name", value="valtxt")) + rownames(out) <- out$hcr + out <- out[,colnames(out) != "hcr"] + return(out) +} + +#------------------------------------------------------------------- +#' @references Ranta and Kaitala 2001 Proc. R. Soc. +#' vt = b * vt-1 + s * sqrt(1 - b^2) +#' s is normally distributed random variable with mean = 0 +#' b is the autocorrelation parameter > -1 and < 1 +#' e.g. -0.8 very blue, 0.8 red +#' @export +next_corrnoise <- function(x, b, sd=0.1){ + s <- rnorm(1,mean=0,sd=sd) + nextx <- b * x + s * sqrt(1 - b^2) + return(nextx) +} + diff --git a/R/modules.R b/R/modules.R new file mode 100644 index 0000000..24d8b3b --- /dev/null +++ b/R/modules.R @@ -0,0 +1,217 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Shiny modules and other reusable functions +# Some of these should be moved to funcs - split between them is not clear at the moment + +# Option of separating the stochasticity and stock parameters into two modules +# Just means combining the two lists of values before dispatching them to the functions + +## Combining the stoch and stock options +## Gets fiddly and possibly restrictive - abandon +## Module for the biological process variability +#stoch_params_setterUI <- function(id, init_prod_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, choose_stock=FALSE){ +# ns <- NS(id) +# show_var <- checkboxInput(ns("show_var"), label = "Show variability options", value = FALSE) +# vars <- conditionalPanel(condition="input.show_var == true", ns=ns, +# numericInput(ns("biol_prod_sigma"), label = "Biological productivity variability", value = init_prod_sigma, min=0, max=1, step=0.05), +# numericInput(ns("biol_est_sigma"), label = "Estimation error variability", value = init_est_sigma, min=0, max=1, step=0.01), +# numericInput(ns("biol_est_bias"), label = "Estimation error bias", value = init_est_bias, min=-0.5, max=0.5, step=0.05) +# ) +# stock_option <- NULL +# if (choose_stock == TRUE){ +# stock_option <- radioButtons("stock_choice", h3("Stock choice"), +# choices = list("Fast growth" = 1, "Medium growth" = 2, +# "Slow growth" = 3),selected = 2) +# } +# out <- tagList(show_var, vars, stock_option) +# return(out) +#} +# +##stoch_params_setter<- function(input, output, session, stock_params){ +#stoch_params_setter<- function(input, output, session){ +# get_stock_params <- reactive({ +# browser() +# +# # Stock option is fixed +# if (is.null(input$stock_choice)) +# +# out <- list( +# r = 0.32, k = 49275, b0 = 37050, p = 1, +# q = 1, # q is catchability so that CPUE = q * B. With effort creep this can change with time +# lrp=0.2, trp=0.5, # These are SB/SBF=0 based reference points - inline with WCPFC tuna stocks +# biol_prod_sigma=input$biol_prod_sigma, +# biol_est_sigma=input$biol_est_sigma, +# biol_est_bias=input$biol_est_bias +# ) +# return(out) +# }) +# return(get_stock_params) +#} + +# Module for the stochasticity options +stoch_params_setterUI <- function(id, init_prod_sigma=0.0, init_est_sigma=0.0, init_est_bias=0.0, show_var=TRUE){ + ns <- NS(id) + show_var <- checkboxInput(ns("show_var"), label = "Show variability options", value = show_var) + vars <- conditionalPanel(condition="input.show_var == true", ns=ns, + tags$span(title="Natural variability in the productivity of the stock through changes in growth and natural mortality", + numericInput(ns("biol_prod_sigma"), label = "Biological productivity variability", value = init_prod_sigma, min=0, max=1, step=0.05) + ), + tags$span(title="Simulating the difference between the 'true' biomass and the 'estimated' biomass used by the HCR by applying randomly generated noise", + numericInput(ns("biol_est_sigma"), label = "Estimation error variability", value = init_est_sigma, min=0, max=1, step=0.01) + ), + tags$span(title="Simulating the difference between the 'true' biomass and the 'estimated' biomass used by the HCR by applying a continuous bias (positive or negative)", + numericInput(ns("biol_est_bias"), label = "Estimation error bias", value = init_est_bias, min=-0.5, max=0.5, step=0.05) + ) + ) + out <- tagList(show_var, vars) + return(out) +} + +stoch_params_setter<- function(input, output, session){ + get_stoch_params <- reactive({ + out <- list(biol_prod_sigma=input$biol_prod_sigma, + biol_est_sigma=input$biol_est_sigma, + biol_est_bias=input$biol_est_bias + ) + return(out) + }) + return(get_stoch_params) +} + +# Choose if we want fast or slow growth etc +stock_params_setterUI <- function(id){ + ns <- NS(id) + stock_lh <- tags$span(title="Choose the life history of the stock: slow, medium or fast growing. Some HCRs are more appropriate for different life histories.", + radioButtons(ns("stock_lh"), label= "Stock life history", + choices = list("Slow growth" = "slow", "Medium growth" = "medium", + "Fast growth" = "fast"), selected = "medium") + ) + stock_history <- tags$span(title="Choose the history of the stock. Underexploited means that the stock could potentially be fished harder. Overexploited means that a recovery plan may be needed.", + radioButtons(ns("stock_history"), label= "Stock history", + choices = list("Underexploited" = "under", "Fully exploited" = "fully", + "Overexploited" = "over"), selected = "fully") + ) + return(tagList(stock_lh, stock_history)) +} + +# From Andre's original code +#Population <- list( +# list(K = 5964.506,r=0.6929,dmsy2=0.36788*2,PType="Fox"), +# list(K = 4868.650,r=1.1621,dmsy2=0.36788*2,PType="Fox"), +# list(K = 4241.026,r=1.5753,dmsy2=0.36788*2,PType="Fox") +# ) + +# Or use the stocks from Polacheck +# Fitting Surplus Production Models: Comparing Methods and Measuring Uncertainty +# Article in Canadian Journal of Fisheries and Aquatic Sciences · December 1993 +#Fitting Surplus Production Models: Comparing Methods and Measuring Uncertainty +#Tom Polacheck, , Ray Hilborn, and , Andre E. Punt +#Canadian Journal of Fisheries and Aquatic Sciences, 1993, 50(12): 2597-2607, https://doi.org/10.1139/f93-284 +# SA Albacore: r=0.328, k=239.6 +# New Zealand Rock Lobster: r=0.0659 k=129 +# Northern Namibian Hake: r=0.379 k= 2772.6 + +stock_params_setter <- function(input, output, session){ + get_stock_params <- reactive({ + # Set r and k depending on the stock choice radio button + # Set MSY to be a 100 for each stock + msy <- 100 + # MSY = rk/4 + r <- switch(input$stock_lh, + "slow" = 0.2, + "medium" = 0.6, + "fast" = 1.0) + k <- 4 * msy / r + out <- list( + r = r, k = k, b0 = k * 2/3, p = 1, + stock_history = input$stock_history, # to set up the initial trajectory + q = 1, # q is catchability so that CPUE = q * B. With effort creep this can change with time + lrp=0.2, trp=0.5 # These are SB/SBF=0 based reference points - inline with WCPFC tuna stocks + ) + return(out) + }) + return(get_stock_params) +} + +# Modules for setting the MP parameters +# Bit fiddly with the conditional panels inside a module +mp_params_setterUI <- function(id, mp_visible=NULL, init_thresh_max_catch=140){ + ns <- NS(id) + all_hcrs <- c("Constant catch" = "constant_catch", + "Constant effort" = "constant_effort", + "Threshold catch" = "threshold_catch", + "Threshold effort" = "threshold_effort") + # The MPs that are visible in the UI are controlled by mp_visible argument + # If not null it should have the names of the HCRs, matching those in the all_hcrs object + #if (is.null(mp_visible)){ + # hcr_choices <- all_hcrs + #} + #else { + + + # Same order as mp_visible + #hcr_choice_names <- names(all_hcrs)[names(all_hcrs) %in% mp_visible] + #hcr_choices <- all_hcrs[hcr_choice_names[order(mp_visible)]] + hcr_choices <- all_hcrs[mp_visible] + #hcr_choices <- all_hcrs[names(all_hcrs) %in% mp_visible] + #} + + # HCR options + hcr_type <- tags$span(title="Select the type of HCR you want to test.", + selectInput(ns("hcr_type"), "HCR type", choices = hcr_choices)) + # The various parameter options + ccpars <- conditionalPanel("input.hcr_type == 'constant_catch'", ns=ns, + tags$span(title="Ignores stock status (e.g. estimated bimoass) and sets a constant catch limit.", + sliderInput(ns("constant_catch_level"), "Constant catch level:", min = 0, max = 150, value = 100, step = 1)) + ) + cepars <- conditionalPanel("input.hcr_type == 'constant_effort'", ns=ns, + tags$span(title="Ignores stock status (e.g. estimated bimoass) and sets a constant effort limit.", + sliderInput(ns("constant_effort_level"), "Constant relative effort level:", min = 0, max = 3, value = 1, step = 0.01)) + ) + # Blim and Belbow used by threshold catch and threshold effort # || JavaScript OR + tctepars <- conditionalPanel("input.hcr_type == 'threshold_catch' || input.hcr_type == 'threshold_effort' ", ns=ns, + tags$span(title="The biomass levels that determine the shape of the HCR.", + sliderInput(ns("blim_belbow"), "Blim and Belbow:", min = 0, max = 1, value = c(0.2,0.5), step = 0.01)) + ) + tcpars <- conditionalPanel("input.hcr_type == 'threshold_catch'", ns=ns, + tags$span(title="The minimum and maximum catch limit levels output from the HCR.", + sliderInput(ns("cmin_cmax"), "Cmin and Cmax:", min = 0, max = 250, value = c(10, init_thresh_max_catch), step = 1)) + ) + tepars <- conditionalPanel("input.hcr_type == 'threshold_effort'", ns=ns, + tags$span(title="The minimum and maximum effort limit levels output from the HCR.", + sliderInput(ns("emin_emax"), "Emin and Emax:", min = 0, max = 3, value = c(0.1,1.0), step = 0.01)) + ) + out <- tagList(hcr_type, ccpars, cepars, tctepars, tcpars, tepars) + return(out) +} + +mp_params_setter <- function(input, output, session){ + get_mp_params <- reactive({ + # Messy setting of the parameters + out <- switch(input$hcr_type, + threshold_catch = list(hcr_shape = "threshold", mp_analysis = "assessment", + mp_type="model", output_type="catch", + name=paste("Thresh. catch: Blim=",input$blim_belbow[1],",Belbow=",input$blim_belbow[2],",Cmin=",input$cmin_cmax[1],",Cmax=",input$cmin_cmax[2], sep=""), + params = c(lim = input$blim_belbow[1], elbow = input$blim_belbow[2], + min = input$cmin_cmax[1], max = input$cmin_cmax[2])), + constant_catch = list(hcr_shape = "constant", mp_analysis = "assessment", + mp_type="model", output_type="catch", + name=paste("Const. catch: level=",input$constant_catch_level,sep=""), + params = c(constant_level = input$constant_catch_level)), + threshold_effort = list(hcr_shape = "threshold", mp_analysis = "assessment", + mp_type="model", output_type="relative effort", + name=paste("Thresh. effort: Blim=",input$blim_belbow[1],",Belbow=",input$blim_belbow[2],",Emin=",input$emin_emax[1],",Emax=",input$emin_emax[2], sep=""), + params = c(lim = input$blim_belbow[1], elbow = input$blim_belbow[2], + min = input$emin_emax[1], max = input$emin_emax[2])), + constant_effort = list(hcr_shape = "constant", mp_analysis = "assessment", mp_type="model", + output_type="relative effort", + name=paste("Const. effort: level=",input$constant_effort_level,sep=""), + params = c(constant_level = input$constant_effort_level)), + stop("In mp_params_setter. Unrecognised hcr_type.")) + out$timelag <- 0 # 2 + return(out) + }) + return(get_mp_params) +} + diff --git a/R/plots.R b/R/plots.R new file mode 100644 index 0000000..a24f3af --- /dev/null +++ b/R/plots.R @@ -0,0 +1,743 @@ +# Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 +# Maintainer: Finlay Scott, OFP SPC + +# Plotting functions +# Globals - eesh! +# Plot params - could make arguments +# WTF? There are so many of them - reduce this madness + hcr_lwd <- 2 + hcr_lty <- 1 + hcr_col <- "red" + + true_col <- "black" + + last_col <- "blue" + last_cex <- 2 + last_pch <- 16 + last_lty <- 1 + last_lwd <- 2 + + guide_lty <- 2 + guide_lwd <- 2 + guide_col <- "blue" + + ref_lty <- 3 + ref_lwd <- 2 + ref_col <- "black" + + ghost_col <- "grey" + ghost_pch <- 16 + ghost_cex <- 2 + ghost_lty <- 1 + ghost_lwd <- 2 + + spaghetti_col <- "black" + spaghetti_lty <- 1 + spaghetti_lwd <- 1 + + ribbon_col <- "grey" + ribbon_lty <- 1 + ribbon_border_col="black" + ribbon_lwd = 2 + + median_col <- "blue" + median_lty <- 2 + median_lwd <- 2 + +# Called by a couple of plotting functions +# Better than maintaining same code in multiple places +get_catch_ymax <- function(catch, mp_params){ + # Sort out dimensions and labels + # Bit annoying that we have a switch + ymax <- switch(mp_params$hcr_shape, + constant = mp_params$params["constant_level"], + threshold = mp_params$params["max"] + ) + ymax <- max(c(ymax, c(catch)), na.rm=TRUE) * 1.1 + return(ymax) +} + +sideways_histogram <- function(dat, range, lhist=20, num.dnorm=5*lhist, dcol="blue"){ + yhist <- hist(dat, plot=FALSE, breaks=seq(from=range[1], to=range[2], length.out=lhist)) + # Use dnorm + yx <- seq(range[1], range[2], length.out=num.dnorm) + yy <- dnorm(yx, mean=mean(dat), sd=sd(dat)) + yy[is.infinite(yy)] <- 1.0 + parmar <- par()$mar + par(mar=c(parmar[1], 0, parmar[3], 0)) + barplot(yhist$density, axes=FALSE, xlim=c(0, max(yhist$density, yy)), space=0, horiz=TRUE, xaxs="i", yaxs="i") # barplot + lines(yy, seq(from=0, to=lhist-1, length.out=num.dnorm), col=dcol) # line + # Or use density + #dens <- density(dat) + #barplot(yhist$density, axes=FALSE, xlim=c(0, max(yhist$density, dens$y)), space=0, horiz=TRUE) # barplot + #lines(dens$y, seq(from=0, to=lhist-1, length.out=length(dens$y)), col=dcol) # line +} + +# Draw the HCR (output vs input) +# Input can be SB/SBF=0, or some output from empirical MP +# Try to keep it flexible +# type - are we stepping or projecting? Better way of doing this +# Use ... instead of passing in more args like timestep? +plot_hcr <- function(stock, stock_params, mp_params, app_params, timestep=NULL, show_last=TRUE, ...){ + + if (mp_params$output_type=="catch"){ + ymax <- get_catch_ymax(stock$catch, mp_params) + } + if (mp_params$output_type=="relative effort"){ + rel_effort <- sweep(stock$effort, 1, stock$effort[,app_params$last_historical_timestep], "/") + ymax <- max(c(rel_effort, mp_params$params["max"], mp_params$params["constant_level"]), na.rm=TRUE) * 1.1 + } + + ylab <- paste("Next ", mp_params$output_type, sep="") + yrange <- c(0, ymax) + # Need to set these depending on the HCR somehow + # From MP analysis type? Leave as B/K for now. Could set additional argument in mp_params like xlab? + xrange <- c(0, 1) + #xlab <- "Estimated biomass / K" + xlab <- "Estimated SB/SBF=0" + # Plot empty axes + plot(x=xrange,y=yrange,type="n",xlab=xlab, ylab=ylab, xaxs="i", yaxs="i", main="The HCR") + + # If model based MP (or NA) add the B/K based reference points + if (mp_params$mp_type %in% c(as.character(NA), "model")){ + lines(x=c(stock_params[["lrp"]],stock_params[["lrp"]]), y=c(0, ymax),lty=ref_lty, lwd=ref_lwd, col=ref_col) + lines(x=c(stock_params[["trp"]],stock_params[["trp"]]), y=c(0, ymax),lty=ref_lty, lwd=ref_lwd, col=ref_col) + } + # If empirical MP how are the reference points defined? Still B/K or some CPUE based measure? + + # Plot HCR outline - this is horrible because it depends on the HCR shape + # Constant catch + if (mp_params$hcr_shape == "constant"){ + lines(x=c(0, 1), y=c(mp_params$params["constant_level"], mp_params$params["constant_level"]), lwd=hcr_lwd, lty=hcr_lty, col=hcr_col) + } + # Threshold + else if (mp_params$hcr_shape == "threshold"){ + lines(x = c(0, mp_params$params["lim"], mp_params$params["elbow"], 1), + y = c(mp_params$params["min"], mp_params$params["min"], mp_params$params["max"], mp_params$params["max"]), lwd=hcr_lwd, lty=hcr_lty, col=hcr_col) + } + else { + stop("In plot_hcr(). Trying to plot the HCR shape but hcr_shape is not recognised.") + } + + # Add all HCR inputs and outputs so far + # Only show the HCR years (from last historical timestep) + # The years that the HCR has been active + hcr_ip_yrs <- ((app_params$last_historical_timestep+1):dim(stock$hcr_ip)[2]) - mp_params$timelag + hcr_op_yrs <- (app_params$last_historical_timestep+1):dim(stock$hcr_ip)[2] + # Show all iters and timesteps as ghosts + points(x=c(stock$hcr_ip[,hcr_ip_yrs]), y=c(stock$hcr_op[,hcr_op_yrs]), col=ghost_col, pch=ghost_pch, cex=ghost_cex) + + # Show iters 1:last_iter as ghosts + # Show last as last + # If there is a timestep argument, last is just that timestep + # Else, last is all of the last iter + if (show_last){ + last_iter <- dim(stock$hcr_ip)[1] + # No timestep so show all timesteps in the iter + if (is.null(timestep)){ + lastx <- c(stock$hcr_ip[last_iter,hcr_ip_yrs]) + lasty <- c(stock$hcr_op[last_iter,hcr_op_yrs]) + } + # If we have a timestep, just the last timestep of the last iter + else { + # Check that timestep has not gone beyond bounds + if (timestep > dim(stock$hcr_ip)[2]){ + timestep <- dim(stock$hcr_ip)[2] + } + # timestep is the catch to be + lastx <- c(stock$hcr_ip[last_iter,timestep-mp_params$timelag]) + lasty <- c(stock$hcr_op[last_iter,timestep]) + # Show the last point as lines + lines(x = c(lastx, lastx), y=c(0, lasty), lty=guide_lty, lwd=guide_lwd, col=guide_col) + lines(x = c(0, lastx), y=c(lasty, lasty), lty=guide_lty, lwd=guide_lwd, col=guide_col) + } + # Plot the last points + points(x=lastx, y=lasty, col=last_col, pch=last_pch, cex=last_cex) + } +} + +# Draw polygon and median +draw_ribbon <- function(x, y, quantiles){ + # Get quantiles + qs <- apply(y, 2, function(x){quantile(x, probs=c(quantiles[1], 0.5, quantiles[2]), na.rm=TRUE)}) + # Draw envelope using polygon - horrible + polyx <- c(x, rev(x)) + polyy <- c(qs[1,],rev(qs[3,])) + # Drop NAs + polyx <- polyx[!is.na(polyy)] + polyy <- polyy[!is.na(polyy)] + polygon(x=polyx, y=polyy, col=ribbon_col, border=NA) + # Add qlines + lines(x=x, y=qs[1,], lty=ribbon_lty, col=ribbon_border_col, lwd=ribbon_lwd) + lines(x=x, y=qs[3,], lty=ribbon_lty, col=ribbon_border_col, lwd=ribbon_lwd) + # Add median + lines(x=x, y=qs[2,], lty=median_lty, col=median_col, lwd=median_lwd) +} + +# Biomass / K +# Plot 'true' and only plot observed if model based MP +plot_biomass <- function(stock, stock_params, mp_params, timestep=NULL, show_last=TRUE, max_spaghetti_iters=50, quantiles, nspaghetti=5, ...){ + years <- as.numeric(dimnames(stock$biomass)$year) + # True bk + bk_true <- stock$biomass / stock_params[["k"]] + # Set up empty plot + plot(x=years, y= bk_true[1,], type="n", ylab="SB/SBF=0", xlab="Year", ylim=c(0,1),xaxs="i", yaxs="i", ...) + # Add LRP and TRP + lines(x=c(years[1], years[length(years)]),y=c(stock_params[["lrp"]],stock_params[["lrp"]]), lty=ref_lty, lwd=ref_lwd, col=ref_col) + lines(x=c(years[1], years[length(years)]),y=c(stock_params[["trp"]],stock_params[["trp"]]), lty=ref_lty, lwd=ref_lwd, col=ref_col) + # Get last iteration + last_iter <- dim(bk_true)[1] + + # If we have more than X iters, draw envelope of iters + if(last_iter > max_spaghetti_iters){ + # Draw ribbon + # Send the 2nd and 4th quantile + draw_ribbon(x=years, y=bk_true, quantiles=quantiles) + # Add spaghetti + for (iter in 1:nspaghetti){ + lines(x=years, y=bk_true[iter,], lty=spaghetti_lty, lwd=spaghetti_lwd, col=spaghetti_col) + } + } + # If we are not drawing the envelope, show all iters as ghosts + else { + if (last_iter > 1){ + for (i in 1:(last_iter - 1)){ + lines(x=years, y=bk_true[i,], col=ghost_col, lty=ghost_lty, lwd=ghost_lwd) + } + } + } + + # Show the last iter as an illustration + if(show_last){ + # Plot the true B/K - plotted first so that the Intro to HCR shows it + lines(x=years, y=bk_true[last_iter,], col=true_col, lwd=last_lwd, lty=last_lty) + # If we have a model based MP, show the HCR IP as it is the estimated biomass too + if (mp_params$mp_type == "model"){ + lines(x=years, y=stock$hcr_ip[last_iter,], col=last_col, lty=last_lty, lwd=last_lwd) + # And if we have obs error + if ((stock_params$biol_est_sigma != 0) | (stock_params$biol_est_bias != 0)){ + legend(x="bottomleft", legend=c("True","Estimated"), lwd=2,col=c(true_col, last_col)) + } + } + } +} + +# If we have timestep we also need app_params +# quantiles of length 2 +plot_catch <- function(stock, stock_params, mp_params, app_params=NULL, timestep=NULL, show_last=TRUE, max_spaghetti_iters=50, quantiles, nspaghetti=5, ...){ + years <- as.numeric(dimnames(stock$biomass)$year) + # Set Ylim - use same as HCR plot + ymax <- get_catch_ymax(stock$catch, mp_params) + yrange <- c(0, ymax) + # Empty axis + plot(x=years, y=years, type="n", ylim=c(0, ymax), ylab="Catch", xlab="Year", xaxs="i", yaxs="i",...) + # Get last iteration + last_iter <- dim(stock$catch)[1] + + # If we have more than X iters, draw envelope of iters + if(last_iter > max_spaghetti_iters){ + # Draw ribbon + draw_ribbon(x=years, y=stock$catch, quantiles=quantiles) + # Add spaghetti + for (iter in 1:nspaghetti){ + lines(x=years, y=stock$catch[iter,], lty=spaghetti_lty, lwd=spaghetti_lwd, col=spaghetti_col) + } + } + # Else plot individual catch iters, depending on timestep argument + else{ + # If we have a timestep then plot horizontal lines showing the catches in the last timestep + if (!is.null(timestep)){ + if (timestep > app_params$last_historical_timestep){ + for (yr in (app_params$last_historical_timestep+1):timestep){ + lines(x=c(years[1], years[length(years)]), y=rep(stock$catch[last_iter, yr],2), lty=guide_lty, col=ghost_col, lwd=guide_lwd) + } + } + # Also plot the proposed catch from the HCR + # Careful with the timelag here? Should be OK + if (timestep < dim(stock$hcr_op)[2]){ + lines(x=c(years[1], years[length(years)]), y=rep(stock$hcr_op[last_iter, timestep+1],2),lty=guide_lty, col=guide_col, lwd=guide_lwd) + } + } + # Plot all iters as ghosts + if (last_iter > 1){ + for (i in 1:last_iter){ + lines(x=years, y=stock$catch[i,], col=ghost_col, lwd=ghost_lwd, lty=ghost_lty) + } + } + } + # Current iteration + if(show_last){ + lines(x=years, y=stock$catch[last_iter,], col=true_col, lwd=last_lwd, lty=last_lty) + } +} + +# Relative CPUE +plot_relcpue <- function(stock, stock_params, mp_params, app_params=NULL, timestep=NULL, show_last=TRUE, max_spaghetti_iters=50, quantiles, nspaghetti=5, ...){ + years <- as.numeric(dimnames(stock$biomass)$year) + cpue <- stock$catch / stock$effort + rel_cpue <- sweep(cpue, 1, cpue[,app_params$last_historical_timestep], "/") + # Set Ylim - use same as HCR plot + #ymax <- max(rel_cpue, na.rm=TRUE) + ymax <- max(c(rel_cpue * 1.1, 1.0), na.rm=TRUE) + + yrange <- c(0, ymax) + # Empty axis + plot(x=years, y=years, type="n", ylim=c(0, ymax), ylab="Relative CPUE", xlab="Year", xaxs="i", yaxs="i",...) + # Add 1 line + lines(x=years,y=rep(1,length(years)), lty=2) + # Get last iteration + last_iter <- dim(stock$catch)[1] + + # If we have more than X iters, draw envelope of iters + if(last_iter > max_spaghetti_iters){ + # Draw ribbon + draw_ribbon(x=years, y=rel_cpue, quantiles=quantiles) + # Add spaghetti + for (iter in 1:nspaghetti){ + lines(x=years, y=rel_cpue[iter,], lty=spaghetti_lty, lwd=spaghetti_lwd, col=spaghetti_col) + } + } + # Else plot individual iters, depending on timestep argument + else{ + # Plot all iters as ghosts + if (last_iter > 1){ + for (i in 1:last_iter){ + lines(x=years, y=rel_cpue[i,], col=ghost_col, lwd=ghost_lwd, lty=ghost_lty) + } + } + } + # Current iteration + if(show_last){ + lines(x=years, y=rel_cpue[last_iter,], col=true_col, lwd=last_lwd, lty=last_lty) + } +} + + +plot_hcr_intro_arrow <- function(stock, timestep){ + # Arrow from B/K to HCR + btimestep <- min(timestep, dim(stock$hcr_ip)[2]) + last_hcr_ip <- stock$hcr_ip[1, btimestep] + npoints <- 100 + theta <- seq(from=0, to=pi/2, length=npoints) + x <- sin(theta) * last_hcr_ip + y <- -1 * cos(theta) * (1-last_hcr_ip) + # Set up plot + plot(x=x, y=y, xlim=c(0,1), ylim=c(-1,0), type="n", xaxt="n", yaxt="n",xlab="", ylab="", axes=FALSE,xaxs="i", yaxs="i") + lines(x=x,y=y,col=guide_col, lwd=guide_lwd) + # Add an arrow + arrows(x0=x[length(x)-1], y0=y[length(y)-1], x1=x[length(x)], y1=y[length(y)],col=guide_col,lwd=guide_lwd) +} + +# This works but... +# It's hard to get a single plot (2x2) to be the right dims. +# It looks better plotting each plot separately +plot_hcr_intro <- function(stock, stock_params, mp_params, app_params, timestep){ + # Main plot for the Intro to HCR app + # 2 x 2 panel + # Catch | HCR + # ---------------- + # B/K | connecting arrow + + par(mfrow=c(2,2)) + #par(mfrow=c(2,2), pty="s") # make square - looks bad + # Catch + # timestep is the timestep of the catch + plot_catch(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, timestep=timestep) + + # HCR + # Add one more onto the timestep because the timestep coming in is for the catch and biomass, hcr_ip and hcr_op are 1 step ahead + plot_hcr(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, timestep=timestep+1) + + # B/K + plot_biomass(stock=stock, stock_params=stock_params, mp_params=mp_params, timestep=timestep+1) + + # Arrow from B/K to HCR + btimestep <- min(timestep+1, dim(stock$hcr_ip)[2]) + last_hcr_ip <- stock$hcr_ip[1, btimestep] + npoints <- 100 + theta <- seq(from=0, to=pi/2, length=npoints) + x <- sin(theta) * last_hcr_ip + y <- -1 * cos(theta) * (1-last_hcr_ip) + # Set up plot + plot(x=x, y=y, xlim=c(0,1), ylim=c(-1,0), type="n", xaxt="n", yaxt="n",xlab="", ylab="", axes=FALSE,xaxs="i", yaxs="i") + lines(x=x,y=y,col=guide_col, lwd=guide_lwd) + # Add an arrow + # Just last two points looks weird + arrows(x0=x[length(x)-1], y0=y[length(y)-1], x1=x[length(x)], y1=y[length(y)],col=guide_col,lwd=guide_lwd) +} + +# Could combine with function above into a single +# quantiles of length 2: lower and upper +plot_metric_with_histo <- function(stock, stock_params, mp_params, metric, app_params=NULL, show_last=TRUE, quantiles=c(0.2,0.8)){ + # Plot the metric with an extra sideways histogram + layout(matrix(c(1,2), ncol=2), widths=c(6/7, 1/7)) + ospc <- 0.5 # outer space + pext <- 4 # par extension down and to the left + bspc <- 1 # space between scatter plot and bar plots + par. <- par(mar=c(pext, pext, bspc, bspc), oma=rep(ospc, 4)) # plot parameters + if (metric == "biomass"){ + # The timeseries of biomass in the big window + plot_biomass(stock=stock, stock_params=stock_params, mp_params=mp_params, show_last=show_last, quantiles=quantiles) + # The histogram should be the true B/K + final_yr <- dim(stock$biomass)[2] + dat <- stock$biomass[,final_yr] / stock_params$k + range <- c(0,1) + } + else if (metric == "catch"){ + # The timeseries of biomass in the big window + plot_catch(stock=stock, stock_params=stock_params, mp_params=mp_params, show_last=show_last, quantiles=quantiles) + final_yr <- dim(stock$catch)[2] + dat <- stock$catch[,final_yr] + range <- c(0, get_catch_ymax(stock$catch, mp_params)) + } + else if (metric == "relcpue"){ + plot_relcpue(stock=stock, stock_params=stock_params, mp_params=mp_params, app_params=app_params, show_last=show_last, quantiles=quantiles) + cpue <- stock$catch / stock$effort + rel_cpue <- sweep(cpue, 1, cpue[,app_params$last_historical_timestep], "/") + final_yr <- dim(rel_cpue)[2] + dat <- rel_cpue[,final_yr] + range <- c(0, max(c(rel_cpue * 1.1, 1.0), na.rm=TRUE)) + } + else { + stop("In plot_metric_with_histo(). Unknown metric\n") + } + # Adapted from: https://stackoverflow.com/questions/11022675/rotate-histogram-in-r-or-overlay-a-density-in-a-barplot + if((nrow(stock$hcr_ip) > 1) & (!all(is.na(dat)))){ + # Get all iters and right timesteps (including timelag) + sideways_histogram(dat=dat, range=range) + # restore parameters + } + #par(par.) +} + +#--------------------------------------------------------------- +# Majuoro plotting functions +# Plot a single stock using the stock object +# Plotting all the stocks using the time series results +plot_majuro_all_stocks <- function(timeseries, hcr_choices, stock_params){ + # Put into one big DF + timeseries <- bind_rows(timeseries, .id="hcr") + # Get the colour palette and add colour to the dataframe + hcrcols <- get_hcr_colours(hcr_names=unique(timeseries$hcr), chosen_hcr_names=hcr_choices) + hcrcolsdf <- data.frame(hcr=names(hcrcols), colour=unname(hcrcols), stringsAsFactors=FALSE) + # Subset metrics and types + timeseries <- subset(timeseries, (hcr %in% hcr_choices) & (type=="quantile") & (metric %in% c("bk", "ffmsy"))) + # Add in the colours + timeseries <- merge(timeseries, hcrcolsdf) + # Add Legend name through the the HCR number + hcrno <- unlist(lapply(strsplit(timeseries$hcr, "\\."),"[",1)) + timeseries$hcrlegend <- paste("HCR ", hcrno, sep="") + plot_majuro(timeseries, stock_params) +} + +plot_majuro_single_stock <- function(stock, stock_params, quantiles){ + # Get the medians and percentiles + fmsy <- stock_params$r / 2 + f <- stock$catch / stock$biomass + ffmsy <- f / fmsy + ffmsyq <- get_quantiles(ffmsy, quantiles=quantiles) + ffmsyq <- cbind(metric="ffmsy", name="F / FMSY", level=c("lower","median","upper"),as.data.frame(ffmsyq)) + bk <- stock$biomass / stock_params$k + bkq <- get_quantiles(bk, quantiles=quantiles) + bkq <- cbind(metric="bk", name="B / K", level=c("lower","median","upper"),as.data.frame(bkq)) + dat <- rbind(bkq, ffmsyq) + dat <- gather(dat, key="year", value="value",-level, -metric, -name) + dat$hcr <- "Current" + dat$colour <- "black" + dat$hcrlegend <- NA + plot_majuro(dat, stock_params) +} + +# Dat is a data.frame with columns: +# HCR, Quantiles (lower, median, upper), FFMSY, BK, colour +plot_majuro <- function(dat, stock_params){ + ymax <- max(c(2.0, 1.1*subset(dat, metric=="ffmsy" & level=="upper")$value), na.rm=TRUE) + ymax <- min(ymax, 5.0) # In case of stock collapse + # Set up the axes + plot(x=c(0,1), y=c(0,2), type="n", xlim=c(0,1), ylim=c(0,ymax), xlab = "B / K", ylab = "F / FMSY", xaxs="i", yaxs="i") + # Set the colour panels + # The big red one + rect(0.0,0.0,stock_params$lrp,ymax, border="black", col="red", lty=1, lwd=1) + # The orange one + rect(stock_params$lrp,1.0,1.0,ymax, border="black", col="orange", lty=1, lwd=1) + # The other one - leave white for now + rect(stock_params$lrp,0.0,1.0,1.0, border="black", col="white", lty=1, lwd=1) + # Loop over HCRs + hcrs <- unique(dat$hcr) + for (hcrcount in hcrs){ + hcrdat <- subset(dat, hcr==hcrcount) + colour <- hcrdat$colour[1] + # Add the medians as points and a line to tell the story + medbk <- subset(hcrdat, metric == "bk" & level=="median")$value + medffmsy <- subset(hcrdat, metric == "ffmsy" & level=="median")$value + points(x=medbk, y=medffmsy, pch=16, col=colour) + # Do a thick black line then overlay the real line + lines(x=medbk, y=medffmsy, lty=1, lwd=4, col="black") + lines(x=medbk, y=medffmsy, lty=1, lwd=3, col=colour) + # Add quantile lines + lowerbk <- subset(hcrdat, metric == "bk" & level=="lower")$value + lowerffmsy <- subset(hcrdat, metric == "ffmsy" & level=="lower")$value + upperbk <- subset(hcrdat, metric == "bk" & level=="upper")$value + upperffmsy <- subset(hcrdat, metric == "ffmsy" & level=="upper")$value + # We always have one more B value so use FFMsy for final year + finalyr <- max(which(!is.na(medffmsy))) + for (yr in 1:finalyr){ + lines(x=c(medbk[yr],medbk[yr]), y=c(lowerffmsy[yr],upperffmsy[yr]), lty=1, lwd=4, col="black") + lines(x=c(medbk[yr],medbk[yr]), y=c(lowerffmsy[yr],upperffmsy[yr]), lty=1, lwd=3, col=colour) + lines(x=c(lowerbk[yr], upperbk[yr]),y=c(medffmsy[yr], medffmsy[yr]), lty=1, lwd=4, col="black") + lines(x=c(lowerbk[yr], upperbk[yr]),y=c(medffmsy[yr], medffmsy[yr]), lty=1, lwd=3, col=colour) + } + # Blob at the end + finalbk <- medbk[finalyr] + finalffmsy <- medffmsy[finalyr] + points(x=finalbk, y=finalffmsy, pch=21, col=colour, bg="white") + } + #browser() + # Add legend + # Adapt HCR name text + # This is wrong - numbers not necessarily 1:X + #if (!("Current" %in% hcrs )){ # Match single stock name in above function - bit dodgy + if(!any(is.na(dat$hcrlegend))){ + hcrnames <- unique(dat$hcrlegend) + legend(x="bottomright", legend=hcrnames, lty=1, lwd=4, col=unique(dat$colour), cex=1.0) + } +} + + +# Plot the majuro plot +# quantiles of length 2 - lower and upper +old_plot_majuro <- function(stock, stock_params, quantiles){ + # F / FMSY against + # B / K + # Plot medians and percentiles + # Just final point? + bk <- stock$biomass / stock_params$k + fmsy <- stock_params$r / 2 + f <- stock$catch / stock$biomass + ffmsy <- f / fmsy + # Get the medians and percentiles + ffmsyqs <- get_quantiles(ffmsy, quantiles=quantiles) + bkqs <- get_quantiles(bk, quantiles=quantiles) + + # Set up the axes + ymax <- max(c(2.0, ffmsyqs * 1.1), na.rm=TRUE) + plot(x=c(0,1), y=c(0,2), xlim=c(0,1), ylim=c(0,ymax), xlab = "B / K", ylab = "F / FMSY") + # And the colour panels + # The big red one + rect(0.0,0.0,stock_params$lrp,ymax, border="black", col="red", lty=1, lwd=1) + # The orange one + rect(stock_params$lrp,1.0,1.0,ymax, border="black", col="orange", lty=1, lwd=1) + # The other one - leave white for now + rect(stock_params$lrp,0.0,1.0,1.0, border="black", col="white", lty=1, lwd=1) + + # Add the medians as points and a line to tell the story + points(x=bkqs[2,], y=ffmsyqs[2,], pch=16, col="black") + lines(x=bkqs[2,], y=ffmsyqs[2,], lty=1, lwd=1) + # And the percentiles as crosses + for (yr in 1:dim(bkqs)[2]){ + if (!is.na(bkqs[1,yr]) & !is.na(bkqs[3,yr]) & !is.na(ffmsyqs[1,yr]) & !is.na(ffmsyqs[3,yr])){ + lines(x=c(bkqs[1,yr], bkqs[3,yr]), y=c(ffmsyqs[2,yr], ffmsyqs[2,yr]), lty=1, lwd=1) + lines(x=c(bkqs[2,yr], bkqs[2,yr]), y=c(ffmsyqs[1,yr], ffmsyqs[3,yr]), lty=1, lwd=1) + } + } + # Show the final year as a big yellow blob + final_yr <- dim(bkqs)[2] + points(x=bkqs[2,final_yr], y=ffmsyqs[2,final_yr], pch=16, cex=1.5, col="yellow") +} + +# Generate vector of colours the same length as the total no of hcrs +# Then subset out the hcrs in hcr_choices +# see for more names: display.brewer.all(colorblindFriendly=TRUE) +# Check the max number of colours in the palette brewer.pal.info +# No HCRs is the total number of HCRs - not the number selected +get_hcr_colours <- function(hcr_names, chosen_hcr_names){ + #allcols <- colorRampPalette(brewer.pal(11,"PiYG"))(length(hcr_names)) + #allcols <- colorRampPalette(brewer.pal(12,"Paired"))(length(hcr_names)) + allcols <- colorRampPalette(brewer.pal(8,"Dark2"))(length(hcr_names)) + names(allcols) <- hcr_names + hcrcols <- allcols[chosen_hcr_names] + return(hcrcols) +} + +#------------------------------------------------------------ +# PI plots + +plot_pi_choice <- function(pis, hcr_choices, pi_choices, plot_choice){ + if (length(pis) == 0){ + return() + } + # Put into one big DF + piqs <- lapply(pis, function(x) return(x$piqs)) + piqs <- bind_rows(piqs, .id="hcr") + + # Sanity check - if all the hcr choices are not in the PI table then something has gone wrong + if (!all(hcr_choices %in% unique(piqs$hcr))){ + stop("In plot_pi_choice(). Not all hcr_choices are in the hcr list.\n") + } + + # Capitalise first letter of term column (add years?) + piqs$term <- paste(toupper(substring(piqs$term, 1,1)), substring(piqs$term, 2), sep="") + # Reorder term column: Short, Medium, Long + piqs$term <- as.factor(piqs$term) + piqs$term <- factor(piqs$term,levels(piqs$term)[c(3,2,1)]) + + # Add Legend name through the the HCR number + hcrno <- unlist(lapply(strsplit(piqs$hcr, "\\."),"[",1)) + piqs$hcrlegend <- paste("HCR ", hcrno, sep="") + # Get the colour palette - based on the hcr legend rather than the full name + hcr_choices2 <- unlist(lapply(strsplit(hcr_choices, "\\."),"[",1)) + hcr_choices2 <- paste("HCR ", hcr_choices2, sep="") + hcrcols <- get_hcr_colours(hcr_names=unique(piqs$hcrlegend), chosen_hcr_names=hcr_choices2) + #hcrcols <- get_hcr_colours(hcr_names=unique(piqs$hcr), chosen_hcr_names=hcr_choices) + + # Drop out unwanted HCRs and PIs + piqs <- subset(piqs, (hcr %in% hcr_choices) & (name %in% pi_choices)) + + # Transform medians for upside down ones - not sure about this + piqs[piqs$value < 1e-9, "value"] <- 1e-9 + piqs_normal <- subset(piqs, pi %in% c("bk", "problrp", "catch", "relcpue")) + piqs_usdown <- subset(piqs, pi %in% c("diffcatch","diffeffort","diffcpue","releffort")) + # Doesn't make sense to transform quantiles + piqs_usdown <- piqs_usdown %>% group_by(pi,term,name,quantiles) %>% mutate(scvalue = (value / max(value, na.rm=TRUE))) + piqs_usdown <- piqs_usdown %>% group_by(pi,term,name,quantiles) %>% mutate(value = (min(scvalue, na.rm=TRUE)/scvalue)) + piqs_transform <- rbind(piqs_normal, piqs_usdown) + # Rename + piqs_transform[piqs_transform$pi == "diffcatch", "name"] <- "Relative catch stability" + piqs_transform[piqs_transform$pi == "diffeffort", "name"] <- "Relative effort stability" + piqs_transform[piqs_transform$pi == "diffcpue", "name"] <- "Relative CPUE stability" + piqs_transform[piqs_transform$pi == "releffort", "name"] <- "Rescaled effort" + + # Plot the medians as a bar chart + if (plot_choice == "PImeds"){ + # The plots - facet on name + # Plot type one - bar chart on q50 + #dat <- subset(piqs, (quantiles=="q50") & (hcr %in% hcr_choices) & (name %in% pi_choices)) + dat <- subset(piqs_transform, (quantiles=="q50")) + p <- ggplot(dat, aes(x=term, y=value, fill=hcrlegend)) + p <- p + geom_bar(stat="identity", position="dodge", colour="black", width=0.7) + p <- p + scale_fill_manual(values=hcrcols) + p <- p + facet_wrap(~name, scales="free") + p <- p + theme(legend.position="bottom", legend.title=element_blank()) + #p <- p + ylim(0, NA) # If -ve variability + return(p) + } + else if (plot_choice == "PIbox"){ + #dat <- subset(piqs) + dat <- piqs + # Spread out the quantiles columns + dat <- dat %>% spread(key="quantiles", value="value") + p <- ggplot(dat, aes(x=term)) + p <- p + geom_boxplot(aes(ymin=q5, ymax=q95, lower=q20,middle=q50,upper=q80, fill=hcrlegend), stat="identity") + p <- p + xlab("Time period") + p <- p + scale_fill_manual(values=hcrcols) + p <- p + theme(legend.position="bottom", legend.title=element_blank()) + p <- p + facet_wrap(~name, scales="free") + #p <- p + ylim(0, NA) # Can be -ve variability + return(p) + } + # What is this shit? + else if (plot_choice == "PIradar"){ + # Want a DF with column of HCR, PI, value + # Use rescaled medians - upside down + dat <- subset(piqs_transform, (quantiles=="q50")) + #dat <- subset(piqs, (quantiles=="q50")) + # Rescale the median to the maximum + # Be careful with 0 values, scaling by max can stuff it up if all PIs have 0 + dat[dat$value == 0,"value"] <- 1e-9 + dat <- dat %>% group_by(term, pi, name) %>% mutate(scvalue = value / max(abs(value), na.rm=TRUE)) + #browser() + # Move variability PIs to + ve scale + #dat[dat$pi %in% c("diffcatch", "diffeffort", "diffcpue"), "scvalue"] <- dat[dat$pi %in% c("diffcatch", "diffeffort", "diffcpue"), "scvalue"] + 1 + # Drop variability PIs as negative scale is a problem + #dat <- dat[!(dat$pi %in% c("diffcatch", "diffeffort", "diffcpue")),] + ## Also drop effort (also points the wrong way) + #dat <- dat[!(dat$pi %in% c("releffort")),] + + # Order is important, rbinding above made a mess + dat <- dat[order(dat$name),] + # ggproto magic taken from: + # From: http://web-r.org/board_ISVF22/8270 + # See: http://web-r.org/board_ISVF22/8271 + # inherits from CoordPolar and sets other arguments? + # coord_polar on its own is a bit weird - I'm guessing that the is_linear argument is the new thing + p <- ggplot(data=dat, aes(x=name, y=scvalue,fill=hcrlegend, group=hcrlegend, colour=hcrlegend)) + #p <- p + geom_point(size=3) + p <- p + geom_polygon(alpha=0.6) + p <- p + xlab("") + ylab("") + theme(legend.position="bottom", legend.title=element_blank()) + p <- p + facet_wrap(~term) + p <- p + scale_fill_manual(values=hcrcols) + p <- p + scale_colour_manual(values=hcrcols) + p <- p + ggproto("CoordRadar", CoordPolar, theta = "x", r = "y", start = 0, direction = 1, is_linear = function(coord) TRUE) + p <- p + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank()) # Remove y axis + p <- p + ylim(0,1) + #p + coord_polar() # For a weird fat look! + return(p) + } + else { + return() + } +} + +plot_timeseries <- function(timeseries, hcr_choices, stock_params, show_spaghetti=FALSE){ + if (length(timeseries) == 0){ + return() + } + # Put into one big DF + timeseries <- bind_rows(timeseries, .id="hcr") + + # Add Legend name through the the HCR number + hcrno <- unlist(lapply(strsplit(timeseries$hcr, "\\."),"[",1)) + timeseries$hcrlegend <- paste("HCR ", hcrno, sep="") + # Get the colour palette - based on the hcr legend rather than the full name + hcr_choices2 <- unlist(lapply(strsplit(hcr_choices, "\\."),"[",1)) + hcr_choices2 <- paste("HCR ", hcr_choices2, sep="") + hcrcols <- get_hcr_colours(hcr_names=unique(timeseries$hcrlegend), chosen_hcr_names=hcr_choices2) + #hcrcols <- get_hcr_colours(hcr_names=unique(piqs$hcr), chosen_hcr_names=hcr_choices) + # Get the colour palette + #hcrcols <- get_hcr_colours(hcr_names=unique(timeseries$hcr), chosen_hcr_names=hcr_choices) + + qdat <- subset(timeseries, (type=="quantile") & (hcr %in% hcr_choices)) + qdat <- qdat %>% spread(key="level", value="value") + # For adding the LRP line to the biomass plot + bkdat <- subset(qdat, metric=="bk") + bkdat$lrp <- stock_params$lrp + bkdat$trp <- stock_params$trp + # For adding the F/FMSY = 1 line + ffmsydat <- subset(qdat, metric=="ffmsy") + ffmsydat$ffmsyref <- 1.0 + + + p <- ggplot(qdat,aes(x=year)) + # Add ribbons + p <- p + geom_ribbon(aes(ymin=lower, ymax=upper, fill=hcrlegend), alpha=0.6) + # Add upper and lower lines + p <- p + geom_line(aes(y=lower, colour=hcrlegend)) + p <- p + geom_line(aes(y=upper, colour=hcrlegend)) + # Add median line + p <- p + geom_line(aes(y=median, colour=hcrlegend), linetype=2, size=1) + + # Add LRP and TRP + p <- p + geom_line(data=bkdat, aes(y=lrp), colour="black", linetype=2, size=0.5) + p <- p + geom_line(data=bkdat, aes(y=trp), colour="black", linetype=2, size=0.5) + + p <- p + facet_wrap(~name, scales="free") + p <- p + xlab("Year") + theme(legend.position="bottom", legend.title=element_blank()) + p <- p + scale_fill_manual(values=hcrcols) + p <- p + scale_colour_manual(values=hcrcols) + p <- p + ylim(0, NA) + + # Add F/FMSY=1 + p <- p + geom_line(data=ffmsydat, aes(y=ffmsyref), colour="black", linetype=2, size=0.5) + + # Add spaghetti + if (show_spaghetti==TRUE){ + spdat <- subset(timeseries, (type=="spaghetti") & (hcr %in% hcr_choices)) + for(ihcr in hcr_choices){ + p <- p + geom_line(data=subset(spdat, hcr == ihcr), aes(y=value, group=level, colour=hcrlegend), size=0.5) + } + } + return(p) +} + + + diff --git a/README.md b/README.md index f147670..0d5631b 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,24 @@ -# ofp-amped -Collection of Shiny applications for exploring harvest control rules. +# Amazing Management Procedure Exploration Device (AMPED) +- Version: 0.1.0 Rupert Trousers +- Date: 2018-10-12 +- Author: Finlay Scott, SPC +- Maintainer: Finlay Scott + +## Overview + +A bunch of Shiny apps with a common set of background functions to explore: + +* What is an HCR? +* Introduction to uncertainty +* Measuring Performance + +## Acknowledgements + +With thanks to André Punt. + +## Licence and copyright + +Copyright 2018 OFP SPC MSE Team. Distributed under the GPL 3 + + + diff --git a/tutorials/figures/IntroHCR_Bbias.png b/tutorials/figures/IntroHCR_Bbias.png new file mode 100644 index 0000000..adc79f3 Binary files /dev/null and b/tutorials/figures/IntroHCR_Bbias.png differ diff --git a/tutorials/figures/IntroHCR_Bnoise.png b/tutorials/figures/IntroHCR_Bnoise.png new file mode 100644 index 0000000..e87adaf Binary files /dev/null and b/tutorials/figures/IntroHCR_Bnoise.png differ diff --git a/tutorials/figures/IntroHCR_Ebias.png b/tutorials/figures/IntroHCR_Ebias.png new file mode 100644 index 0000000..adc79f3 Binary files /dev/null and b/tutorials/figures/IntroHCR_Ebias.png differ diff --git a/tutorials/figures/IntroHCR_Enoise.png b/tutorials/figures/IntroHCR_Enoise.png new file mode 100644 index 0000000..24b74ea Binary files /dev/null and b/tutorials/figures/IntroHCR_Enoise.png differ diff --git a/tutorials/figures/IntroHCR_HCR2.png b/tutorials/figures/IntroHCR_HCR2.png new file mode 100644 index 0000000..7f6c552 Binary files /dev/null and b/tutorials/figures/IntroHCR_HCR2.png differ diff --git a/tutorials/figures/introHCR_HCR1.png b/tutorials/figures/introHCR_HCR1.png new file mode 100644 index 0000000..2fe8d18 Binary files /dev/null and b/tutorials/figures/introHCR_HCR1.png differ diff --git a/tutorials/figures/introHCR_HCR3.png b/tutorials/figures/introHCR_HCR3.png new file mode 100644 index 0000000..931dcce Binary files /dev/null and b/tutorials/figures/introHCR_HCR3.png differ diff --git a/tutorials/figures/introHCR_end.png b/tutorials/figures/introHCR_end.png new file mode 100644 index 0000000..27acf29 Binary files /dev/null and b/tutorials/figures/introHCR_end.png differ diff --git a/tutorials/figures/introHCR_start.png b/tutorials/figures/introHCR_start.png new file mode 100644 index 0000000..02d93f4 Binary files /dev/null and b/tutorials/figures/introHCR_start.png differ diff --git a/tutorials/figures/introUncertainty1.png b/tutorials/figures/introUncertainty1.png new file mode 100644 index 0000000..c7632da Binary files /dev/null and b/tutorials/figures/introUncertainty1.png differ diff --git a/tutorials/figures/introUncertainty2.png b/tutorials/figures/introUncertainty2.png new file mode 100644 index 0000000..431894f Binary files /dev/null and b/tutorials/figures/introUncertainty2.png differ diff --git a/tutorials/figures/introUncertainty_PIs.png b/tutorials/figures/introUncertainty_PIs.png new file mode 100644 index 0000000..37dd1fb Binary files /dev/null and b/tutorials/figures/introUncertainty_PIs.png differ diff --git a/tutorials/figures/introUncertainty_PIs2.png b/tutorials/figures/introUncertainty_PIs2.png new file mode 100644 index 0000000..7f3f067 Binary files /dev/null and b/tutorials/figures/introUncertainty_PIs2.png differ diff --git a/tutorials/figures/introUncertainty_start.png b/tutorials/figures/introUncertainty_start.png new file mode 100644 index 0000000..b336794 Binary files /dev/null and b/tutorials/figures/introUncertainty_start.png differ diff --git a/tutorials/figures/measuringPerformanceHCR1.png b/tutorials/figures/measuringPerformanceHCR1.png new file mode 100644 index 0000000..de5f73c Binary files /dev/null and b/tutorials/figures/measuringPerformanceHCR1.png differ diff --git a/tutorials/figures/measuringPerformance_boxPlot.png b/tutorials/figures/measuringPerformance_boxPlot.png new file mode 100644 index 0000000..095a635 Binary files /dev/null and b/tutorials/figures/measuringPerformance_boxPlot.png differ diff --git a/tutorials/figures/measuringPerformance_medianBar.png b/tutorials/figures/measuringPerformance_medianBar.png new file mode 100644 index 0000000..79ab56e Binary files /dev/null and b/tutorials/figures/measuringPerformance_medianBar.png differ diff --git a/tutorials/figures/measuringPerformance_radar.png b/tutorials/figures/measuringPerformance_radar.png new file mode 100644 index 0000000..e9a0084 Binary files /dev/null and b/tutorials/figures/measuringPerformance_radar.png differ diff --git a/tutorials/figures/measuringPerformance_start.png b/tutorials/figures/measuringPerformance_start.png new file mode 100644 index 0000000..db31944 Binary files /dev/null and b/tutorials/figures/measuringPerformance_start.png differ diff --git a/tutorials/introHCR.Rmd b/tutorials/introHCR.Rmd new file mode 100644 index 0000000..b46e206 --- /dev/null +++ b/tutorials/introHCR.Rmd @@ -0,0 +1,174 @@ +--- +title: "Using the *Introduction to HCRs* Shiny app" +author: "Finlay Scott - OFP, SPC" +date: "2018-10-10" +output: + html_document: + keep_md: yes + pdf_document: default +--- + + + + +# Introduction to HCRs + +This is a very quick overview of the *Introduction to HCRs* Shiny app. + +# Quick overview + +To get started double-click on the **IntroHCR** file in the AMPED directory. +A black window *should* appear, followed by the app opening in a browser window. +If this does not happen, something has gone wrong. Sorry... + +If things have gone well you should see three plotting windows and a blue arrow. +Your plots might look slightly different due to variations in the historic catches. + + + + +![](figures/introHCR_start.png) + +The two plots on the left-hand side show the history of the catch and SB/SBF=0. At the moment, there are only 10 years of history from 1990 to 1999. + +The HCR is shown in the top-right panel. +The shape of the HCR is determined by 4 parameters: *Blim*, *Belbow*, *Cmin* and *Cmax*. + +The purpose of this particular HCR is to set the catch limit in the following year. The HCR uses the current estimated value of SB/SBF=0 to set the catch limit in the following year. + +The shape of the HCR is that when the estimated SB/SBF=0 is greater than *Belbow* the catch limit is set at *Cmax*. +When SB/SBF=0 is less than *Blim* the catch limit is set at *Cmin*. +When SB/SBF=0 is betwen *Blim* and *Belbow*, the catch limit is set according to the slope. +The controls for the HCR are on the left-hand side of the screen. + +The way the HCR operates can be seen by following the blue arrow from the SB/SBF=0 plot to the HCR plot at the bottom right of the screen. The current estimated value of SB/SBF=0 is shown on the HCR plot as the blue dashed vertical line. +The catch limit in the following year is set by reading the corresponding catch limit from the shape of the HCR. +This is shown by the blue dashed horizontal line on the HCR plot and also the catch limit. + +## Projecting through time + +We can see how the HCR works by projecting the stock forward by a year. +The catch limit that has been set by the HCR is shown by the blue dashed horizontal line on the catch plot. +As we go forward in time, the stock is fished by this catch limit and the population abundance responds accordingly. +The new estimate of the current level of SB/SBF=0 is then used by the HCR to set the catch limit in the following year. + +To see this in action, set the parameters of the HCR to: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140 (these are the initial values). Press the **Advance** buton. You should see that the catch in the next year (year 2000) hits the previous catch limt (where the blue dashed line was), the SB/SBF=0 plot updates by a year as a result of being fished, and the HCR uses the new estimate of SB/SBF=0 to set the catch limit in the next year. + +Keep pressing the **Advance** button and you should see the cycle of how the HCR sets the catch limit, the catch limit affects the stock abundance, and the stock abundance is used by the HCR. +You should see that eventually the system settles down to a steady catch limit and stock abundance. + + +![](figures/introHCR_HCR1.png) + +## Try other HCRs + +The parameters of the HCR that we just tried are: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140. + + +Try setting up a different HCR. Change *Belbow* to be 0.3 and *Cmax* to 120. Keep the other parameters the same. +You should see that the HCR plot has been updated to show the new shape of the HCR. +This HCR has lower maximum catches than the first HCR and responds more slowly (the catches don't start decreasing until SB/SBF=0 is less than 0.3) + +Repeatedly press the **Advance** button and follow the evolution of the stock. +Note how the behaviour and final values are different to the initial example. + + +![](figures/introHCR_HCR2.png) + +As another example set *Belbow* to 0.7 and *Cmax* to 160. This means that the maximum catches will be higher than the other examples. The HCR will respond more quickly to changes in the stock abundance and the catches will start to decrease when SB/SBF=0 falls below 0.7. +Again, follow the new evolution of the stock and catch limits by repeatedly pressing the **Advance** button and note the behaviour and final values. + + +![](figures/introHCR_HCR3.png) + +You should have seen that different parameterisations of the HCR lead to different stock dynamics and final settled values. + +# Introducing uncertainty + +In the real world, fisheries have a lot of uncertainty associated with them. +However, the projections we have run so far have not considered uncertainty. +This means that the outcome of the projections will always be the same (they are *deterministic* simulations). + +In this Shiny app we can include two sources of uncertainty: variability in the biological productivity and estimation error. +Both of these sources of uncertainty can affect the dynamics of the fishery and the performance of the HCR. + +Click on the **Show variability option** to show the uncertainty options. + +## Biological productivity variability + +Biological productivity variability reflects the natural variability in the stock dynamics, for example through variability in the recruitment, growth and natural mortality. Fisheries managers have no control over this source of uncertainty. +As such it is very important than an adopted HCR is robust to this uncertainty. + +We saw in the previous examples without uncertainty that eventually the stock abundance settles down and is perfectly flat. +What happens we include natural variability? + +Set the HCR parameters back to their original values (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10, *Cmax* = 140). +Increase the **Biological productivity variability** to 0.2 and project forward through time using the **Advance** button. + + +![](figures/introHCR_Bnoise.png) + +You should see that the SB/SBF=0 now bounces around and is not perfectly flat. As the HCR is driven by the estimate of SB/SBF=0, it means the the catch limit set by the HCR also bounces around. This then goes onto affect the stock and so on. + +If you press the **Reset** button and run the projection again you should see that you get a different trajectory (uncertainty in action!). + + +## Estimation error + +In the real world we do not know the true stock abundance. This means that the HCR is not driven by the true value of SB/SBF=0. Instead it is driven by an *estimated* value. +For example, it can be estimated by using a stock assessment model. +The catch limit that is then set by the HCR is therefore based on estimated values of the stock abundance, not the true values. +The difference between the estimated and true value of SB/SBF=0 is called the estimation error and it can have an important impact on the performance of a HCR. + +Here estimation error can be set using two ways: variability and bias. These can be combined. + +To demonstrate these turn the **Biological productivty variability** back to 0 and increase the **Estimation error variability** to 0.04 (leave **bias** as 0 for now). +Project the stock forward several times using the **Advance** button. + + +![](figures/introHCR_Enoise.png) + +You should see that the SB/SBF=0 plot now shows two lines. The black one is the *true* abundance and the blue one is the *estimated* abundance. It is the estimated abundance that is used by the HCR to set the catch limit. +You should see that the stock and catch bounce around. This variability is not caused by any biological variability (you have turned that off) but from the HCR using the *estimated* value of SB/SBF=0 to set the catch limit. The catch limit, of course, affects the *real* stock abundance. + +Now turn the **Estimation error variability** back to 0 and set the **Estimation error bias** to 0.1. +This means that the estimated value of SB/SBF=0 is always 10% higher than the true value, i.e. we are always overestimating the stock abduncance. +Project forward and see what happens. Is the final value of the true SB/SBF=0 higher or lower than when there is no bias? + + +![](figures/introHCR_Ebias.png) + + +## Combining uncertainty + +Now turn on all the sources of uncertainty. +Set **Biological productivity variability** to 0.2, **Estimation error variability** to 0.04 and the **Estimation error bias** to 0.1. +Keep the HCR values the same as the initial values (*Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140). + +Project this forward. How are the results different to the first projection we ran which had the same HCR parameters but no uncertainty? + +![](figures/introHCR_end.png) + +If you have the time and enthusiasm, you can rerun the other two HCRs we looked at with the uncertainty turned on to see how they perform. + +# Summary + +A HCR is a rule for setting fishing opportunities. +In this example the input to the rule is *estimated* stock abundance (SB/SBF=0) and the output is the catch limit in the following year. +Different HCR parameterisations give different performances. + +Uncertainty is a big concern in fisheries management. +Here we looked at biological and estimation uncertainty. We have seen that they can change the performance of the fishery. +It is very important that an HCR is robust to uncertainty. +A HCR that performs well in the absence of uncertainty may not perform as well when uncertainty is present. + +How do we know which HCR to use? How do we consider uncertainty? +See the next tutorial! + + + + + + diff --git a/tutorials/introHCR.html b/tutorials/introHCR.html new file mode 100644 index 0000000..625495b --- /dev/null +++ b/tutorials/introHCR.html @@ -0,0 +1,430 @@ + + + + + + + + + + + + + + + +Using the Introduction to HCRs Shiny app + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+

Introduction to HCRs

+

This is a very quick overview of the Introduction to HCRs Shiny app.

+
+
+

Quick overview

+

To get started double-click on the IntroHCR file in the AMPED directory. A black window should appear, followed by the app opening in a browser window. If this does not happen, something has gone wrong. Sorry…

+

If things have gone well you should see three plotting windows and a blue arrow. Your plots might look slightly different due to variations in the historic catches.

+ + +

+

The two plots on the left-hand side show the history of the catch and SB/SBF=0. At the moment, there are only 10 years of history from 1990 to 1999.

+

The HCR is shown in the top-right panel. The shape of the HCR is determined by 4 parameters: Blim, Belbow, Cmin and Cmax.

+

The purpose of this particular HCR is to set the catch limit in the following year. The HCR uses the current estimated value of SB/SBF=0 to set the catch limit in the following year.

+

The shape of the HCR is that when the estimated SB/SBF=0 is greater than Belbow the catch limit is set at Cmax. When SB/SBF=0 is less than Blim the catch limit is set at Cmin. When SB/SBF=0 is betwen Blim and Belbow, the catch limit is set according to the slope. The controls for the HCR are on the left-hand side of the screen.

+

The way the HCR operates can be seen by following the blue arrow from the SB/SBF=0 plot to the HCR plot at the bottom right of the screen. The current estimated value of SB/SBF=0 is shown on the HCR plot as the blue dashed vertical line. The catch limit in the following year is set by reading the corresponding catch limit from the shape of the HCR. This is shown by the blue dashed horizontal line on the HCR plot and also the catch limit.

+
+

Projecting through time

+

We can see how the HCR works by projecting the stock forward by a year. The catch limit that has been set by the HCR is shown by the blue dashed horizontal line on the catch plot. As we go forward in time, the stock is fished by this catch limit and the population abundance responds accordingly. The new estimate of the current level of SB/SBF=0 is then used by the HCR to set the catch limit in the following year.

+

To see this in action, set the parameters of the HCR to: Blim = 0.2, Belbow = 0.5, Cmin = 10 and Cmax = 140 (these are the initial values). Press the Advance buton. You should see that the catch in the next year (year 2000) hits the previous catch limt (where the blue dashed line was), the SB/SBF=0 plot updates by a year as a result of being fished, and the HCR uses the new estimate of SB/SBF=0 to set the catch limit in the next year.

+

Keep pressing the Advance button and you should see the cycle of how the HCR sets the catch limit, the catch limit affects the stock abundance, and the stock abundance is used by the HCR. You should see that eventually the system settles down to a steady catch limit and stock abundance.

+ +

+
+
+

Try other HCRs

+

The parameters of the HCR that we just tried are: Blim = 0.2, Belbow = 0.5, Cmin = 10 and Cmax = 140.

+

Try setting up a different HCR. Change Belbow to be 0.3 and Cmax to 120. Keep the other parameters the same. You should see that the HCR plot has been updated to show the new shape of the HCR. This HCR has lower maximum catches than the first HCR and responds more slowly (the catches don’t start decreasing until SB/SBF=0 is less than 0.3)

+

Repeatedly press the Advance button and follow the evolution of the stock. Note how the behaviour and final values are different to the initial example.

+ +

+

As another example set Belbow to 0.7 and Cmax to 160. This means that the maximum catches will be higher than the other examples. The HCR will respond more quickly to changes in the stock abundance and the catches will start to decrease when SB/SBF=0 falls below 0.7. Again, follow the new evolution of the stock and catch limits by repeatedly pressing the Advance button and note the behaviour and final values.

+ +

+

You should have seen that different parameterisations of the HCR lead to different stock dynamics and final settled values.

+
+
+
+

Introducing uncertainty

+

In the real world, fisheries have a lot of uncertainty associated with them. However, the projections we have run so far have not considered uncertainty. This means that the outcome of the projections will always be the same (they are deterministic simulations).

+

In this Shiny app we can include two sources of uncertainty: variability in the biological productivity and estimation error. Both of these sources of uncertainty can affect the dynamics of the fishery and the performance of the HCR.

+

Click on the Show variability option to show the uncertainty options.

+
+

Biological productivity variability

+

Biological productivity variability reflects the natural variability in the stock dynamics, for example through variability in the recruitment, growth and natural mortality. Fisheries managers have no control over this source of uncertainty. As such it is very important than an adopted HCR is robust to this uncertainty.

+

We saw in the previous examples without uncertainty that eventually the stock abundance settles down and is perfectly flat. What happens we include natural variability?

+

Set the HCR parameters back to their original values (Blim = 0.2, Belbow = 0.5, Cmin = 10, Cmax = 140). Increase the Biological productivity variability to 0.2 and project forward through time using the Advance button.

+ +

+

You should see that the SB/SBF=0 now bounces around and is not perfectly flat. As the HCR is driven by the estimate of SB/SBF=0, it means the the catch limit set by the HCR also bounces around. This then goes onto affect the stock and so on.

+

If you press the Reset button and run the projection again you should see that you get a different trajectory (uncertainty in action!).

+
+
+

Estimation error

+

In the real world we do not know the true stock abundance. This means that the HCR is not driven by the true value of SB/SBF=0. Instead it is driven by an estimated value. For example, it can be estimated by using a stock assessment model. The catch limit that is then set by the HCR is therefore based on estimated values of the stock abundance, not the true values. The difference between the estimated and true value of SB/SBF=0 is called the estimation error and it can have an important impact on the performance of a HCR.

+

Here estimation error can be set using two ways: variability and bias. These can be combined.

+

To demonstrate these turn the Biological productivty variability back to 0 and increase the Estimation error variability to 0.04 (leave bias as 0 for now). Project the stock forward several times using the Advance button.

+ +

+

You should see that the SB/SBF=0 plot now shows two lines. The black one is the true abundance and the blue one is the estimated abundance. It is the estimated abundance that is used by the HCR to set the catch limit. You should see that the stock and catch bounce around. This variability is not caused by any biological variability (you have turned that off) but from the HCR using the estimated value of SB/SBF=0 to set the catch limit. The catch limit, of course, affects the real stock abundance.

+

Now turn the Estimation error variability back to 0 and set the Estimation error bias to 0.1. This means that the estimated value of SB/SBF=0 is always 10% higher than the true value, i.e. we are always overestimating the stock abduncance. Project forward and see what happens. Is the final value of the true SB/SBF=0 higher or lower than when there is no bias?

+ +

+
+
+

Combining uncertainty

+

Now turn on all the sources of uncertainty. Set Biological productivity variability to 0.2, Estimation error variability to 0.04 and the Estimation error bias to 0.1. Keep the HCR values the same as the initial values (Blim = 0.2, Belbow = 0.5, Cmin = 10 and Cmax = 140).

+

Project this forward. How are the results different to the first projection we ran which had the same HCR parameters but no uncertainty?

+

+

If you have the time and enthusiasm, you can rerun the other two HCRs we looked at with the uncertainty turned on to see how they perform.

+
+
+
+

Summary

+

A HCR is a rule for setting fishing opportunities. In this example the input to the rule is estimated stock abundance (SB/SBF=0) and the output is the catch limit in the following year. Different HCR parameterisations give different performances.

+

Uncertainty is a big concern in fisheries management. Here we looked at biological and estimation uncertainty. We have seen that they can change the performance of the fishery. It is very important that an HCR is robust to uncertainty. A HCR that performs well in the absence of uncertainty may not perform as well when uncertainty is present.

+

How do we know which HCR to use? How do we consider uncertainty? See the next tutorial!

+ +
+ + + + +
+ + + + + + + + diff --git a/tutorials/introHCR.pdf b/tutorials/introHCR.pdf new file mode 100644 index 0000000..79dc4f8 Binary files /dev/null and b/tutorials/introHCR.pdf differ diff --git a/tutorials/introUncertainty.Rmd b/tutorials/introUncertainty.Rmd new file mode 100644 index 0000000..9976e74 --- /dev/null +++ b/tutorials/introUncertainty.Rmd @@ -0,0 +1,147 @@ +--- +title: "Using the *Introduction to uncertainty* Shiny app" +author: "Finlay Scott - OFP, SPC" +date: "2018-10-10" +output: + html_document: + keep_md: yes + pdf_document: default +--- + + + + +# Introducing uncertainty + +In the previous tutorial (*Introduction to HCRs*) we ran projections by going forward one step at a time. +This hopefully allowed us to see the basic ideas of how a HCR worked. +We also looked at two sources of uncertainty (*biological productivity variability* and *estimation error*) and saw how they can affect the performance of a HCR. +In this tutorial we build on this by running full projections with uncertainty. + +Start by double-clicking on the **IntroUncertainty** file in the AMPED directory. +A black window *should* appear, followed by the app opening in a browser window. +If this does not happen, something has gone wrong. Sorry... + +# Simple introduction + +The layout of the app should show the HCR with three time series plots of SB/SBF=0, catch and CPUE (relative to the CPUE in 1999). The parameters of the HCR can be set using the controls on the left-hand side. + + +![](figures/introUncertainty_start.png) + +The initial values of the HCR parameters are: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140. +Click on the **Run projection** button to perform a 20 year projection (from 2000 to 2020). + + +![](figures/introUncertainty1.png) + +You should see that the time series plots now show the time series until 2020. +The HCR plot should show blue points on the parts of the HCR that were active during the projection. + +Click **Run projection** again. Nothing seems to happen except weird bars at the end of the time series plots. +You may notice that the **Number of iterations** counter under the HCR plot also increased by one. +Keep clicking **Run projection**. Nothing much happens. This is boring. + +What is happening? +Each time you click **Run projection** we are running a new projection. +However, we have no uncertainty in the projection so the result is exactly the same. + +This is not very interesting. + +# Turn on the variability + +We introduced uncertainty in the previous tutorial (*Introduction to HCRs*). In this app there are two sources of uncertainty: *Biological productivity variability* and *Estimation error*. + +Click on the **Show variability options** to show the uncertainty options. +Set **Biological productivity variability** to 0.2, **Estimation error variability** to 0.04 and leave **Estimation error bias** at 0. +Keep the HCR parameters the same. + +Click **Run projection**. You should see that now your projection is bumpy. +Click **Run projection** again. You should get another time series but it is different to the previous one (the previous one is in grey, the new one in black). +This second projection has the same stock and the same HCR as the first one but the outcome is different. +The difference is a result of the uncertainty in the biological productivity and the estimation error. +The different projections are known as iterations. + +The black line shows the current iteration (projection), the grey line the past iterations. +This is the same on the HCR plot - the grey dots show the past iterations. + +Click **Run projection** again and again. More lines will appear. +The bars at the end of the time series plots are histograms of the values in the final year of the projection. +These histograms will fill in the more projections you run. + +Keep clicking **Run projection** until you get 30 or more iterations. +You can see the distribution of the final values of the projection start to settle down. +This means that we are starting to capture the uncertainty in our projected values. +As you keep clicking this distribution will settle down. When this happens we are more certain about the impact of uncertainty and get estimates of the distributions of values. + + +![](figures/introUncertainty2.png) + + +When testing a HCR through simulation we don't run just one projection, we run lots of them. +How many depends on the sources of uncertainty, the life history of the stock etc. We need the distribution to settle down. + +These types of projections with uncertainty are different to running a single deterministic (non-random) projection. +If we want to understand how uncertainty may affect the performance of a HCR in the real world, running a deterministic projection is not enough. + +We need to select a HCR that is robust to the uncertainty. +The big question is how to choose HCR a preferred HCR? + +You need some way of comparing performance. So how do you mention performance? + +# Introducing performance indicators + +Press the **Reset** button. +We are interested in measuring how well the HCR is performing. +To do this we can use Performance Indicators (PIs) to measure different metrics. Lots of different PIs are available that measure different things, for example, catch levels, changes in effort, probability of SB/SBF=0 being above the LRP etc. +The PIs should relate to the management objectives for the fishery and allow you to measure how well the fishery is performing in relation to those objectives. Some HCRs will perform well for some PIs and poorly for others. This is where the ideas of prioritising PIs and evaluating trade-offs come in. + + + +Click on the **Show performance indicators** button. Nothing happens - yet. +With the HCR and uncertainty parameters set as before, cLick the **Run projection** button. +You should see that a table has appeared with various PIs over different time periods. +The PIs are in the rows in the table. +The different time periods (short-, medium- and long-term) are the columns. +The average value of the PIs is calculated over the differnt time periods. + +At the time of writing there are 8 indicators. Don't worry about all of them for the moment. +Just look at *SB/SBF=0*, *Prob. SB > LRP* (the probability of SB/SBF=0 being above the LRP), *Catch* and *CPUE (rel. 1999)* for now. + + +Click the **Run projection** button again so that we have two iterations. Notice that the values in the table have changed. +We now have two extra values for each PI in (). +The single value is the median of the two iterations. The values in the () are the 20 and 80 percentile. +It doesn't make much sense to calculate this for just 2 iterations. + +Keep clicking **Run projection** until you have about 30 iterations. +You should see that the numbers update in the table but start to settle down (in the same way that the histogram settles down). + +![](figures/introUncertainty_PIs.png) + +Take note of the long-term values of *SB/SBF=0*, *Prob. SB > LRP*, *catch* and *CPUE (rel. 1999)*. + + + +# Another HCR + +We can do this again for a different HCR. +Set these parameters: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 160. +Keep the same uncertainty options as before. + +Click **Run projection** about 30 times and note down the long-term behaviour of SB/SBF=0, catch and relative CPUE. + +![](figures/introUncertainty_PIs2.png) + + + +The catches and probability of being above the LRP look about the same, but SB/SBF=0 and CPUE are higher for the second HCR. +This means if we only considered these four PIs in the long-term you would probably pick the second HCR instead of the first one. + +# Summary + +When we simulate the performance of a HCR we don't run just one projection, we run lots. How many depends on the sources of uncertainty, the life history of the stock etc. We need the distribution to settle down. + +This means we don't have just one value for the results but a distribution of values. +We need to summarise these. Here we use the median and the 20, 80 percentile. + diff --git a/tutorials/introUncertainty.html b/tutorials/introUncertainty.html new file mode 100644 index 0000000..b2a54f0 --- /dev/null +++ b/tutorials/introUncertainty.html @@ -0,0 +1,406 @@ + + + + + + + + + + + + + + + +Using the Introduction to uncertainty Shiny app + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+

Introducing uncertainty

+

In the previous tutorial (Introduction to HCRs) we ran projections by going forward one step at a time. This hopefully allowed us to see the basic ideas of how a HCR worked. We also looked at two sources of uncertainty (biological productivity variability and estimation error) and saw how they can affect the performance of a HCR. In this tutorial we build on this by running full projections with uncertainty.

+

Start by double-clicking on the IntroUncertainty file in the AMPED directory. A black window should appear, followed by the app opening in a browser window. If this does not happen, something has gone wrong. Sorry…

+
+
+

Simple introduction

+

The layout of the app should show the HCR with three time series plots of SB/SBF=0, catch and CPUE (relative to the CPUE in 1999). The parameters of the HCR can be set using the controls on the left-hand side.

+ +

+

The initial values of the HCR parameters are: Blim = 0.2, Belbow = 0.5, Cmin = 10 and Cmax = 140. Click on the Run projection button to perform a 20 year projection (from 2000 to 2020).

+ +

+

You should see that the time series plots now show the time series until 2020. The HCR plot should show blue points on the parts of the HCR that were active during the projection.

+

Click Run projection again. Nothing seems to happen except weird bars at the end of the time series plots. You may notice that the Number of iterations counter under the HCR plot also increased by one. Keep clicking Run projection. Nothing much happens. This is boring.

+

What is happening? Each time you click Run projection we are running a new projection. However, we have no uncertainty in the projection so the result is exactly the same.

+

This is not very interesting.

+
+
+

Turn on the variability

+

We introduced uncertainty in the previous tutorial (Introduction to HCRs). In this app there are two sources of uncertainty: Biological productivity variability and Estimation error.

+

Click on the Show variability options to show the uncertainty options. Set Biological productivity variability to 0.2, Estimation error variability to 0.04 and leave Estimation error bias at 0. Keep the HCR parameters the same.

+

Click Run projection. You should see that now your projection is bumpy. Click Run projection again. You should get another time series but it is different to the previous one (the previous one is in grey, the new one in black). This second projection has the same stock and the same HCR as the first one but the outcome is different. The difference is a result of the uncertainty in the biological productivity and the estimation error. The different projections are known as iterations.

+

The black line shows the current iteration (projection), the grey line the past iterations. This is the same on the HCR plot - the grey dots show the past iterations.

+

Click Run projection again and again. More lines will appear. The bars at the end of the time series plots are histograms of the values in the final year of the projection. These histograms will fill in the more projections you run.

+

Keep clicking Run projection until you get 30 or more iterations. You can see the distribution of the final values of the projection start to settle down. This means that we are starting to capture the uncertainty in our projected values. As you keep clicking this distribution will settle down. When this happens we are more certain about the impact of uncertainty and get estimates of the distributions of values.

+ +

+

When testing a HCR through simulation we don’t run just one projection, we run lots of them. How many depends on the sources of uncertainty, the life history of the stock etc. We need the distribution to settle down.

+

These types of projections with uncertainty are different to running a single deterministic (non-random) projection. If we want to understand how uncertainty may affect the performance of a HCR in the real world, running a deterministic projection is not enough.

+

We need to select a HCR that is robust to the uncertainty. The big question is how to choose HCR a preferred HCR?

+

You need some way of comparing performance. So how do you mention performance?

+
+
+

Introducing performance indicators

+

Press the Reset button. We are interested in measuring how well the HCR is performing. To do this we can use Performance Indicators (PIs) to measure different metrics. Lots of different PIs are available that measure different things, for example, catch levels, changes in effort, probability of SB/SBF=0 being above the LRP etc. The PIs should relate to the management objectives for the fishery and allow you to measure how well the fishery is performing in relation to those objectives. Some HCRs will perform well for some PIs and poorly for others. This is where the ideas of prioritising PIs and evaluating trade-offs come in.

+

Click on the Show performance indicators button. Nothing happens - yet. With the HCR and uncertainty parameters set as before, cLick the Run projection button. You should see that a table has appeared with various PIs over different time periods. The PIs are in the rows in the table. The different time periods (short-, medium- and long-term) are the columns. The average value of the PIs is calculated over the differnt time periods.

+

At the time of writing there are 8 indicators. Don’t worry about all of them for the moment. Just look at SB/SBF=0, Prob. SB > LRP (the probability of SB/SBF=0 being above the LRP), Catch and CPUE (rel. 1999) for now.

+

Click the Run projection button again so that we have two iterations. Notice that the values in the table have changed. We now have two extra values for each PI in (). The single value is the median of the two iterations. The values in the () are the 20 and 80 percentile. It doesn’t make much sense to calculate this for just 2 iterations.

+

Keep clicking Run projection until you have about 30 iterations. You should see that the numbers update in the table but start to settle down (in the same way that the histogram settles down).

+

+

Take note of the long-term values of SB/SBF=0, Prob. SB > LRP, catch and CPUE (rel. 1999).

+ +
+
+

Another HCR

+

We can do this again for a different HCR. Set these parameters: Blim = 0.2, Belbow = 0.7, Cmin = 10 and Cmax = 160. Keep the same uncertainty options as before.

+

Click Run projection about 30 times and note down the long-term behaviour of SB/SBF=0, catch and relative CPUE.

+

+ +

The catches and probability of being above the LRP look about the same, but SB/SBF=0 and CPUE are higher for the second HCR. This means if we only considered these four PIs in the long-term you would probably pick the second HCR instead of the first one.

+
+
+

Summary

+

When we simulate the performance of a HCR we don’t run just one projection, we run lots. How many depends on the sources of uncertainty, the life history of the stock etc. We need the distribution to settle down.

+

This means we don’t have just one value for the results but a distribution of values. We need to summarise these. Here we use the median and the 20, 80 percentile.

+
+ + + + +
+ + + + + + + + diff --git a/tutorials/introUncertainty.pdf b/tutorials/introUncertainty.pdf new file mode 100644 index 0000000..335ba91 Binary files /dev/null and b/tutorials/introUncertainty.pdf differ diff --git a/tutorials/measuringPerformance.Rmd b/tutorials/measuringPerformance.Rmd new file mode 100644 index 0000000..2097da0 --- /dev/null +++ b/tutorials/measuringPerformance.Rmd @@ -0,0 +1,163 @@ +--- +title: "Using the *Measuring performance* Shiny app" +author: "Finlay Scott OFP, SPC" +date: "2018-10-10" +output: + html_document: + keep_md: yes + pdf_document: default +--- + + + + +# Measuring performance + +In the previous tutorial (*Introduction to uncertainty*) we ran projections one iteration at a time. +We included two sources of uncertainty (*biological productivity variability* and *estimation error*) and began to look how to compare the performance of different HCRs using Performance Indicators (PIs). +In this tutorial we build on this by assembling a basket of candidate HCRs and comparing their performance in a number of ways. + +Start by double-clicking on the **MeasuringPerformance** file in the AMPED directory. +A black window *should* appear, followed by the app opening in a browser window. +If this does not happen, something has gone wrong. Sorry... + + +![](figures/measuringPerformance_start.png) + +You should be presented with the familiar HCR plot on the left of the main panel, and three time series plots on the right of the main panel which show SB/SBF=0, catch and CPUE relative to the CPUE in the year 1999. +The uncertainty is already switched on and the time series plots already have multiple iterations (the default is 100 iterations). +The grey envelopes contain the 20-80 percentile values and the blue, dashed line is the median. Some individual trajectories are also shown (known as spaghetti). + +We are going to run a series of projections with multiple iterations to test the performance of various HCRs +The basic process we will follow here is: + +* Set up a HCR using the HCR parameters on the left-hand side; +* Project the stock forward in time under that HCR (by pressing the **Project HCR** button); +* Have a quick check of the resulting time series plots and PI values; +* If you like the HCR, add it to the basket of candidate HCRs (by pressing the **Add HCR to basket** button); +* When you have several HCRs in the basket, go to the **Compare performance** tab and take a look at relative performance of them. + +# A quick example + +## Setting up HCRs, running projections and adding to the basket + +The initial values of the HCR parameters are: *Blim* = 0.2, *Belbow* = 0.5, *Cmin* = 10 and *Cmax* = 140. +Press the **Project HCR** button to run the projection. +This runs a projection with 100 iterations. The results can be seen in the time series plots and the table of PIs. + +There are 8 PIs in the table. *SB/SBF=0* and *Catch* are fairly self explanatory. *Effort (rel. 1999)* and *CPUE (rel. 1999)* are the fishing effort and CPUE relative to their values in 1999 respectively. *Prob. SB > LRP* is the probability of of SB/SBF=0 being above the LRP. *Catch variability*, *Effort variability* and *CPUE variability* measure the variability in the catch, relative effort and relative CPUE respectively. They measure how much the catch etc. change over the time (the bumpiness in the plots). The higher the value, the more the value changes over time. + +It should be noted that the PIs don't all point the same way. +It is generally thought that the higher the value of *SB/SBF=0*, *Prob. SB > LRP*, *Catch* and *CPUE (rel. 1999)* the better the HCR is performing. +However, for *Effort (rel. 1999)* and the *variability* PIs, lower values are preferred. The higher the effort, the greater the costs. Stable catches and effort are preferred to catches and effort that varying strongly between years. +Care must therefore be taken when using PIs to compare performance of HCRs. + +Click on the **Add HCR to basket** button to add the HCR to the basket of candidate HCRs. You should see that the counter **Number of HCRs in basket** increases by 1. + + +![](figures/measuringPerformanceHCR1.png) + +Repeat this process (set up, run, add) for two other HCRs. +Use the following parameters: + +HCR2: *Blim* = 0.2, *Belbow* = 0.7, *Cmin* = 10 and *Cmax* = 160. + +HCR3: *Blim* = 0.2, *Belbow* = 0.3, *Cmin* = 10 and *Cmax* = 120. + +You should now have three HCRs in your basket (check the counter). + +## Comparing performance + +We can now compare the performance of the three HCRs. Select the **Compare performance** tab at the top of the app window. +You should see bar plots of the median values of the PIs (in each panel), of each HCR in the basket (the different colours) in the three different time periods (short-, medium- and long-term). + + +![](figures/measuringPerformance_medianBar.png) + +It should be noted that in this plot the 'upside down' PIs (*Effort (rel. 1999)* and the *variability* PIs) have been transformed so that the higher the value the better. The variability PIs now measure the relative stability. The higher the value, the more stable the values. + +There is a lot of information presented here. PIs that are thought to be unimportant (perhaps they do not measure anything related to your management objectives) can be dropped by deselecting them from the list in the left panel. +Similarly, HCRs can be deselected if they are thought to be of no interest. + + + +For example, we might think that the effort variability and CPUE variability do not matter to us. +Deselect them from the list on the left. +This gives us 6 PIs left to consider. + +Which of these three HCRs is performing better? +Considering the medium-term period only, HCR2 has the highest CPUE but HCR3 has the highest catches. +HCR3 has the highest effort, which means higher costs, which might make it less desirable. +HCR2 has the highest catch stability. + +The different HCRs have different values for SB / SBF=0 but the probability of being above the LRP is 1 for all of them so does measuring SB / SBF=0 matter? + +## Other methods of comparison + +The **Compare performance** tab has six different sub-tabs at the top that allow you to explore the performance of the HCRs in different ways. Looking at the bar plots of the medians does not tell us about the distribution of values and effectively ignores the uncertainty. You can see the uncertainty by looking at the **Performance indicators - boxplots** tab. + +![](figures/measuringPerformance_boxPlot.png) + +You can see that the different HCRs have different levels of uncertainty in the PIs. For example, HCR3 has the highest values of the CPUE but it also has the widest distribution of values meaning it is less certain. This might be worth considering when selecting a preferred HCR. + +Note that on the box plot, the 'upside down' PIs (*Effort (rel. 1999)* and the *variability* PIs) have not been transformed. This means that lower values of variability and effort are thought to be better. This does make comparison a bit tricky. Sorry. + +You can also look at **Performance indicators - radar** for radar plots (if you like them...). +The 'upside down' PIs have been transformed for the radar plot so the larger the area, the better the HCR is thought to be performing. + +![](figures/measuringPerformance_radar.png) + +**Performance indicators - table** has a table of PI values in the long-term (he 'upside down' PIs have not been transformed so that lower variability and effort values are better). +**Majuro plots** has a Majuro plot with the different stock trajectories on. +**Time series** has time series plots of various metrics. + +With all of them you can select and deselect the HCRs to help compare the performances. + +If you return to the *HCR Selection* tab and press the *Empty basket* you will empty all the HCRs from your basket. + +You should decide which PIs are the most important to you (i.e. have the highest priority) and then try to find a HCR that performs the best. + +# Summary + +Choosing a preferred HCR is not a trivial task. It is possible to calculate many different indicators to meaure their performance. The distribution of these indicators should be considered as well as their central (median or average) value. Additionally, you can have different time periods to consider. + +It may not be possible to find a HCR that performs well for all the chosen PIs. In this case PIs should be considered in order of their priority and trade-offs will need to be evaluated. + + + + + + + + + + + + diff --git a/tutorials/measuringPerformance.html b/tutorials/measuringPerformance.html new file mode 100644 index 0000000..a1276cf --- /dev/null +++ b/tutorials/measuringPerformance.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + +Using the Measuring performance Shiny app + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + +
+

Measuring performance

+

In the previous tutorial (Introduction to uncertainty) we ran projections one iteration at a time. We included two sources of uncertainty (biological productivity variability and estimation error) and began to look how to compare the performance of different HCRs using Performance Indicators (PIs). In this tutorial we build on this by assembling a basket of candidate HCRs and comparing their performance in a number of ways.

+

Start by double-clicking on the MeasuringPerformance file in the AMPED directory. A black window should appear, followed by the app opening in a browser window. If this does not happen, something has gone wrong. Sorry…

+ +

+

You should be presented with the familiar HCR plot on the left of the main panel, and three time series plots on the right of the main panel which show SB/SBF=0, catch and CPUE relative to the CPUE in the year 1999. The uncertainty is already switched on and the time series plots already have multiple iterations (the default is 100 iterations). The grey envelopes contain the 20-80 percentile values and the blue, dashed line is the median. Some individual trajectories are also shown (known as spaghetti).

+

We are going to run a series of projections with multiple iterations to test the performance of various HCRs The basic process we will follow here is:

+
    +
  • Set up a HCR using the HCR parameters on the left-hand side;
  • +
  • Project the stock forward in time under that HCR (by pressing the Project HCR button);
  • +
  • Have a quick check of the resulting time series plots and PI values;
  • +
  • If you like the HCR, add it to the basket of candidate HCRs (by pressing the Add HCR to basket button);
  • +
  • When you have several HCRs in the basket, go to the Compare performance tab and take a look at relative performance of them.
  • +
+
+
+

A quick example

+
+

Setting up HCRs, running projections and adding to the basket

+

The initial values of the HCR parameters are: Blim = 0.2, Belbow = 0.5, Cmin = 10 and Cmax = 140. Press the Project HCR button to run the projection. This runs a projection with 100 iterations. The results can be seen in the time series plots and the table of PIs.

+

There are 8 PIs in the table. SB/SBF=0 and Catch are fairly self explanatory. Effort (rel. 1999) and CPUE (rel. 1999) are the fishing effort and CPUE relative to their values in 1999 respectively. Prob. SB > LRP is the probability of of SB/SBF=0 being above the LRP. Catch variability, Effort variability and CPUE variability measure the variability in the catch, relative effort and relative CPUE respectively. They measure how much the catch etc. change over the time (the bumpiness in the plots). The higher the value, the more the value changes over time.

+

It should be noted that the PIs don’t all point the same way. It is generally thought that the higher the value of SB/SBF=0, Prob. SB > LRP, Catch and CPUE (rel. 1999) the better the HCR is performing. However, for Effort (rel. 1999) and the variability PIs, lower values are preferred. The higher the effort, the greater the costs. Stable catches and effort are preferred to catches and effort that varying strongly between years. Care must therefore be taken when using PIs to compare performance of HCRs.

+

Click on the Add HCR to basket button to add the HCR to the basket of candidate HCRs. You should see that the counter Number of HCRs in basket increases by 1.

+ +

+

Repeat this process (set up, run, add) for two other HCRs. Use the following parameters:

+

HCR2: Blim = 0.2, Belbow = 0.7, Cmin = 10 and Cmax = 160.

+

HCR3: Blim = 0.2, Belbow = 0.3, Cmin = 10 and Cmax = 120.

+

You should now have three HCRs in your basket (check the counter).

+
+
+

Comparing performance

+

We can now compare the performance of the three HCRs. Select the Compare performance tab at the top of the app window. You should see bar plots of the median values of the PIs (in each panel), of each HCR in the basket (the different colours) in the three different time periods (short-, medium- and long-term).

+ +

+

It should be noted that in this plot the ‘upside down’ PIs (Effort (rel. 1999) and the variability PIs) have been transformed so that the higher the value the better. The variability PIs now measure the relative stability. The higher the value, the more stable the values.

+

There is a lot of information presented here. PIs that are thought to be unimportant (perhaps they do not measure anything related to your management objectives) can be dropped by deselecting them from the list in the left panel. Similarly, HCRs can be deselected if they are thought to be of no interest.

+ +

For example, we might think that the effort variability and CPUE variability do not matter to us. Deselect them from the list on the left. This gives us 6 PIs left to consider.

+

Which of these three HCRs is performing better? Considering the medium-term period only, HCR2 has the highest CPUE but HCR3 has the highest catches. HCR3 has the highest effort, which means higher costs, which might make it less desirable. HCR2 has the highest catch stability.

+

The different HCRs have different values for SB / SBF=0 but the probability of being above the LRP is 1 for all of them so does measuring SB / SBF=0 matter?

+
+
+

Other methods of comparison

+

The Compare performance tab has six different sub-tabs at the top that allow you to explore the performance of the HCRs in different ways. Looking at the bar plots of the medians does not tell us about the distribution of values and effectively ignores the uncertainty. You can see the uncertainty by looking at the Performance indicators - boxplots tab.

+

+

You can see that the different HCRs have different levels of uncertainty in the PIs. For example, HCR3 has the highest values of the CPUE but it also has the widest distribution of values meaning it is less certain. This might be worth considering when selecting a preferred HCR.

+

Note that on the box plot, the ‘upside down’ PIs (Effort (rel. 1999) and the variability PIs) have not been transformed. This means that lower values of variability and effort are thought to be better. This does make comparison a bit tricky. Sorry.

+

You can also look at Performance indicators - radar for radar plots (if you like them…). The ‘upside down’ PIs have been transformed for the radar plot so the larger the area, the better the HCR is thought to be performing.

+

+

Performance indicators - table has a table of PI values in the long-term (he ‘upside down’ PIs have not been transformed so that lower variability and effort values are better). Majuro plots has a Majuro plot with the different stock trajectories on. Time series has time series plots of various metrics.

+

With all of them you can select and deselect the HCRs to help compare the performances.

+

If you return to the HCR Selection tab and press the Empty basket you will empty all the HCRs from your basket.

+

You should decide which PIs are the most important to you (i.e. have the highest priority) and then try to find a HCR that performs the best.

+
+
+
+

Summary

+

Choosing a preferred HCR is not a trivial task. It is possible to calculate many different indicators to meaure their performance. The distribution of these indicators should be considered as well as their central (median or average) value. Additionally, you can have different time periods to consider.

+

It may not be possible to find a HCR that performs well for all the chosen PIs. In this case PIs should be considered in order of their priority and trade-offs will need to be evaluated.

+ + +
+ + + + +
+ + + + + + + + diff --git a/tutorials/measuringPerformance.pdf b/tutorials/measuringPerformance.pdf new file mode 100644 index 0000000..8ee16dc Binary files /dev/null and b/tutorials/measuringPerformance.pdf differ