diff --git a/.Rbuildignore b/.Rbuildignore
new file mode 100644
index 0000000000000000000000000000000000000000..1e05c5746714b35fff3b992d50f0e19c30fe7a3f
--- /dev/null
+++ b/.Rbuildignore
@@ -0,0 +1,21 @@
+^.*\.Rproj$
+^\.Rproj\.user$
+^vignettes/onlineforecasting_pdf_source
+^vignettes/building-heat-load-forecasting_cache-rls
+^vignettes/building-heat-load-forecasting_files
+^vignettes/setup-and-use-models_cache
+^vignettes/setup-and-use-models_files
+^vignettes/setup-and-use-models.html
+^vignettes/setup-and-use-models.Rmd
+^vignettes/setup-and-use-models.R
+^vignettes/setup-data_cache
+^vignettes/setup-data_files
+^vignettes/setup-data.html
+^vignettes/setup-data.Rmd
+^vignettes/setup-data.R
+^vignettes/cache
+^vignettes/tmp-output/
+^tests
+^vignettes/literature.bib
+^misc-R$
+^.*\.Rhistory$
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..64b396e91c92c78623f57f6919f62124c04298aa
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,35 @@
+.Rproj.user
+.Rhistory
+.RData
+.Ruserdata
+
+NAMESPACE
+
+*.o
+src/onlineforecast\.so
+inst/doc
+
+modifications_old_notstaged/
+
+
+cache/
+
+man/
+
+misc-R/*cache*
+
+vignettes/*cache*
+vignettes/*genfig*
+vignettes/*_files*
+vignettes/tmp-output/
+vignettes/setup-data_cache/
+vignettes/solar-forecasting_cache-rls/
+vignettes/building-heat-load-forecasting_cache/
+vignettes/onlineforecasting_pdf_source/onlineforecasting\.tex
+
+vignettes/onlineforecasting_pdf_source/*cache*
+vignettes/onlineforecasting_pdf_source/*genfig*
+vignettes/onlineforecasting_pdf_source/onlineforecasting-tikzDictionary
+vignettes/onlineforecasting_pdf_source/onlineforecasting.log
+vignettes/onlineforecasting_pdf_source/onlineforecasting.pdf
+
diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000000000000000000000000000000000000..cb28706a6a679af8f34b6a46d2d7c459ae672b24
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,30 @@
+Package: onlineforecast
+Type: Package
+Title: Forecast modelling for online application
+Version: 1.0.0
+Description: A package for fitting adaptive forecasting models. Provides a way to use forecasts as input to models, e.g. weather forecasts for energy related forecasting. The models can be fitted recursively and can easily be setup for updating parameters when new data arrives.
+License: GPL-3
+Encoding: UTF-8
+LazyData: true
+Authors@R: c(
+    person("Peder", "Bacher", email="pbac@dtu.dk", role = c("aut", "cre")),
+    person("Hjorleifur", "Bergsteinsson", "G", email="hgbe@dtu.dk", role = c("aut","cre"))
+Depends: R (>= 3.0)
+Imports:
+    Rcpp (>= 0.12.18),
+    R6 (>= 2.2.2),
+    splines (>= 3.1.1),
+    plotly,
+    digest
+LinkingTo: Rcpp, RcppArmadillo
+Suggests:
+    knitr,
+    rmarkdown,
+    R.rsp,
+    testthat (>= 2.1.0)
+VignetteBuilder:
+    knitr,
+    R.rsp
+RoxygenNote: 7.1.0
+URL: http://onlineforecasting.org
+BugReports: https://lab.compute.dtu.dk/pbac/onlineforecasting/-/issues
\ No newline at end of file
diff --git a/R/AR.R b/R/AR.R
new file mode 100644
index 0000000000000000000000000000000000000000..080a158e2c80ab7cd9f0c7313d4aeab521aeb984
--- /dev/null
+++ b/R/AR.R
@@ -0,0 +1,99 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?AR
+
+#' Generate auto-regressive (AR) inputs in a model
+#'
+#' The AR function can be used in an onlineforecast model formulation. It
+#' creates the input matrices for including AR inputs in a model during the
+#' transformation stage. It takes the values from the model output in the provided data
+#' does the needed lagging.
+#'
+#' The lags must be given according to the one-step ahead model, e.g.:
+#'
+#' \code{AR(lags=c(0,1))} will give: Y_{t+1|t} = \eqn{\phi_1} y_{t-0} + \eqn{\phi_2} y_{t-1} + \eqn{\epsilon}_{t+1}
+#'
+#' and:
+#'
+#' \code{AR(lags=c(0,3,12))} will give: Y_{t+1|t} = \eqn{\phi}_1 y_{t-0} + \eqn{\phi}_2 y_{t-3} + \eqn{\phi}_3 y_{t-12} + \eqn{\epsilon}_{t+1}
+#'
+#' Note, that 
+#'
+#' For k>1 the coefficients will be fitted individually for each horizon, e.g.:
+#' 
+#' \code{AR(lags=c(0,1))} will be the multi-step AR: Y_{t+k|t} = \eqn{\phi}_{1,k} y_{t-0} + \eqn{\phi}_{2,k} y_{t-1} + \eqn{\epsilon}_{t+k|t}
+#'
+#' See the details in ??(ref til vignette).
+#' 
+#' @title Auto-Regressive (AR) input
+#' @param lags integer vector: The lags of the AR to include.
+#' @return A list of matrices, one for each lag in lags, each with columns according to model$kseq.
+#' @examples
+#'
+#' # Setup data and a model for the example
+#'
+#' model <- forecastmodel$new()
+#' model$output = "heatload"
+#' # Use the AR in the transformation stage
+#' model$add_inputs(AR = "AR(c(0,1))")
+#' # Regression parameters
+#' model$add_regprm("rls_prm(lambda=0.9)")
+#' # kseq must be added
+#' model$kseq <- 1:4
+#' # In the transformation stage the AR input will be generated
+#' # See that it generates two input matrices, simply with the lagged heat load at t for every k
+#' model$transform_data(subset(D, 1:10))
+
+#' # Fit with recursive least squares (no parameters prm in the model)
+#' fit <- rls_fit(c(lambda=0.99), model, D, returnanalysis=TRUE)
+
+#' # Plot the result, see "?plot_ts.rls_fit"
+#' plot(fit, xlim=c(asct("2010-12-20"),max(D$t)))
+#' # Plot for a short period with peaks
+#' plot(fit, xlim=c("2011-01-05","2011-01-07"))
+
+#' # For online updating, see ??ref{vignette}:
+#' # the needed lagged output values are stored in the model for next time new data is available
+#' model$yAR
+#' # The maximum lag needed is also kept
+#' model$maxlagAR
+#'
+#' @export
+
+AR <- function(lags){
+    # Just sort them first
+    lags <- sort(lags)
+    # Get the data and the model from an environment above (this way has worked until now, not exactly sure why the environments are in this order)
+    data <- parent.env(parent.frame())$data
+    model <- parent.env(parent.frame())$self$model
+    
+    # Remember the max lag for later, only if bigger than current (should make set function doing this check)
+    if(is.na(model$maxlagAR) | max(lags) > model$maxlagAR){
+        model$maxlagAR <- max(lags)
+    }
+
+    # Setup the AR inputs, one matrix for each lag
+    retval <- lapply(lags, function(lag){
+        # Check if saved output values for AR exists
+    	if(is.na(model$yAR[1])){
+            # First time its called, so just use output values from data
+            val <- matrix(lag(data[[model$output]], lag), nrow=length(data$t), ncol=length(model$kseq))
+    	}else{
+            y <- c(model$yAR, data$y)
+            # Find the seq for the new y lagged vector
+            iend <- (length(y)-lag)
+            istart <- iend - length(data$y) + 1
+            # Take the sequence
+            y <- y[istart:iend]
+            # Insert in a matrix with column for each k
+            val <- matrix(y, nrow=length(data$t), ncol=length(model$kseq))
+        }
+        # Name the columns and return
+    	nams(val) <- pst("k", model$kseq)
+    	return(val)
+    })
+    names(retval) <- pst("lag", lags)
+    return(retval)
+}
diff --git a/R/RcppExports.R b/R/RcppExports.R
new file mode 100644
index 0000000000000000000000000000000000000000..9bd609395c449dba4099a290a498b10dd87ab940
--- /dev/null
+++ b/R/RcppExports.R
@@ -0,0 +1,38 @@
+# Generated by using Rcpp::compileAttributes() -> do not edit by hand
+# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+#' Low pass filtering of a vector.
+#' 
+#' This function returns a vector which is x through a unity gain first-order low-pass filter.
+#'
+#' @name lp_vector_cpp
+#' @param x A numeric vector
+#' @param a1 the first order low-pass filter coefficient
+NULL
+
+lp_vector_cpp <- function(x, a1) {
+    .Call('_onlineforecast_lp_vector_cpp', PACKAGE = 'onlineforecast', x, a1)
+}
+
+#' Calculating k-step recursive least squares estimates
+#' 
+#' This function applies the k-step recursive least squares scheme to estimate
+#' parameters in a linear regression model.
+#'
+#' @name rls_update_cpp
+#' @param y Vector of observation
+#' @param X Matrix of input variables (design matrix)
+#' @param theta Vector of parameters (initial value)
+#' @param P Covariance matrix (initial value)
+#' @param lambda Forgetting factor
+#' @param k Forecast horizon
+#' @param n Length of the input
+#' @param np Dimension of P (np x np)
+#' @param istart Start index
+#' @param kmax Keep only the last kmax rows for next time
+NULL
+
+rls_update_cpp <- function(y, X, theta, P, lambda, k, n, np, istart, kmax) {
+    .Call('_onlineforecast_rls_update_cpp', PACKAGE = 'onlineforecast', y, X, theta, P, lambda, k, n, np, istart, kmax)
+}
+
diff --git a/R/as.data.list.R b/R/as.data.list.R
new file mode 100644
index 0000000000000000000000000000000000000000..2539e73a016d49cac70e97b8b9e76efc0f1fe5e5
--- /dev/null
+++ b/R/as.data.list.R
@@ -0,0 +1,104 @@
+# Do this in a separate file to see the generated help:
+# library(devtools)
+# document()
+# load_all(as.package("../../onlineforecast"))
+# ?as.data.list
+# ?as.data.list.data.frame
+
+
+#' These functions will convert the object into a data.list.
+#'
+#' A data.list is simply a list of vectors and data.frames. For the use in the 
+#' onlineforecast package the following format must be kept:
+#' 
+#'   - t: A vector of time.
+#' 
+#'   - vectors with same length as t: Holds observations and values synced to time t.
+#' 
+#'   - data.frames with number of rows as time t: Holds forecasts in each column named by \code{kxx} where \code{xx} is the
+#'                                                horizon, e.g. \code{k0} is synced as observations, and \code{k1} is one-step ahead.
+#'
+#' @title Convert to data.list class
+#' @param object The object to be converted into a data.list
+#' @param ... 
+#' @return a value of class data.list
+#' @seealso \code{For specific detailed info see the children, e.g. \link{as.data.list.data.frame} }
+#' @family as.data.list
+#' 
+#' @export
+as.data.list <- function(object, ...){
+    UseMethod("as.data.list")
+}
+
+
+
+
+#' Convert a data.frame into a data.list
+#'
+#' The convention is that columns with forecasts are postfixed with \code{.kxx} where
+#' \code{xx} is the horizon. See the examples.
+#'
+#' @title Convertion of data.frame into data.list
+#' @param object 
+#' @return a data.list
+#' @seealso as.data.list
+#' @family as.data.list
+#' @examples
+#' # Convert a dataframe with time and two observed variables
+#' X <- data.frame(t=1:10, x=1:10, y=1:10)
+#' as.data.list(X)
+#'
+#' # Convert a dataframe with time, forecast and an observed variable
+#' X <- data.frame(t=1:10, x.k1=1:10, x.k2=10:1, y.obs=1:10, y.k1=1:10, y.k2=1:10)
+#' as.data.list(X)
+#'
+#' # Can be converted back and forth
+#' X
+#' as.data.frame(as.data.list(X))
+#'
+#' @export
+as.data.list.data.frame <- function(object) {
+    X <- object
+    #TEST
+    #grep("\\.[hk][[:digit:]]+$", c("Ta.k1","Ta.k2","I.h1"))
+    # Check which columns hold forecasts and must be returned as data.frames in the data.list
+    inmsfor <- grep("\\.[hk][[:digit:]]+$", names(X))
+    #
+    if(length(inmsfor) > 0){
+        # Find the names of them
+        nmsfor <- unique(unlist(
+            getse(strsplit(names(X)[inmsfor], "\\."), 1)
+        ))
+        # Group all in a list
+        # Note that "u.k2y" is matched, but it maybe shouldn't be: grep("\\.k[:digit:]*", c("ij.k1","i","u.k2y"))
+        L <- lapply(nmsfor, function(nm){
+            return(inmsfor[grep(pst("^",nm), names(X)[inmsfor])])
+        })
+        names(L) <- nmsfor
+        # The vectors (time t, and observations)
+        Lobs <- as.list((1:ncol(X))[-inmsfor])
+        names(Lobs) <- names(X)[-inmsfor]
+    }else{
+        # No forecasts found
+        L <- list()
+        # The vectors (time t, and observations)
+        Lobs <- as.list((1:ncol(X)))
+        names(Lobs) <- names(X)
+    }
+    #
+    # Combine and sort like the order they came in
+    L <- c(L, Lobs)
+    L <- L[order(unlist(getse(L, 1)))]
+    #
+    # Make the data.list
+    val <- lapply(L, function(i) {
+        tmp <- X[ ,i]
+        if(!is.null(dim(tmp))){
+            # Its a forecast, hence a data.frame, remove "name." from the column names
+            names(tmp) <- getse(strsplit(names(tmp), "\\."), 2)
+        }
+        return(tmp)
+    })
+    class(val) <- "data.list"
+    return(val)
+}
diff --git a/R/asct.R b/R/asct.R
new file mode 100644
index 0000000000000000000000000000000000000000..e4aeb3dcc9e75c1b041bfa8883258092a1b7469e
--- /dev/null
+++ b/R/asct.R
@@ -0,0 +1,135 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?asct
+#?asct.default
+
+#' The object is converted into POSIXct with tz="GMT".
+#'
+#' A simple helper, which wraps \code{\link{as.POSIXct}}` and sets the time zone to "GMT" per default.
+#' 
+#' @title Convertion to POSIXct
+#' @param object The object to convert can be: character, numeric, POSIXct or POSIXlt
+#' @param tz Timezone. If set, then the time zone will be changed of the object.
+#' @param ... 
+#' @return An object of class POSIXct
+#' @section Methods:
+#' @examples
+#'
+#'
+#' # Create a POSIXct with tz="GMT"
+#' asct("2019-01-01")
+#' class(asct("2019-01-01"))
+#' asct("2019-01-01 01:00:05")
+
+
+#' # Convert to POSIXct
+#' class(asct(as.POSIXlt(x)))
+ 
+#' # To seconds and back again
+#' asct(as.numeric(x, units="sec"))
+
+
+#' # --------
+#' # Convert character of time which has summer time leaps
+#' # Example from CET (with CEST which is winter time)
+#' # 
+#' # The point of shifting to and from summer time:
+#' # DST Start (Clock Forward)	DST End (Clock Backward)
+#' # Sunday, March 31, 02:00	Sunday, October 27, 03:00
+
+#' # --------
+#' # From to winter time to summer time
+#' txt <- c("2019-03-31 01:00",
+#'          "2019-03-31 01:30",
+#'          "2019-03-31 03:00",
+#'          "2019-03-31 03:30")
+#' x <- asct(txt, tz="CET")
+#' x
+#' asct(x, tz="GMT")
+
+#' # BE AWARE of this conversion of the 02:00: to 02:59:59 (exact time of shift) will lead to a wrong conversion
+#' txt <- c("2019-03-31 01:30",
+#'          "2019-03-31 02:00",
+#'          "2019-03-31 03:30")
+#' x <- asct(txt, tz="CET")
+#' x
+#' asct(x, tz="GMT")
+#' # Which a diff on the time can detect, since all steps are not equal
+#' plot(diff(asct(x, tz="GMT")))
+ 
+#' # --------
+#' # Shift to winter time is more problematic
+#' # It works like this 
+#' txt <- c("2019-10-27 01:30",
+#'          "2019-10-27 02:00",
+#'          "2019-10-27 02:30",
+#'          "2019-10-27 03:00",
+#'          "2019-10-27 03:30")
+#' x <- asct(txt, tz="CET")
+#' x
+#' asct(x, tz="GMT")
+
+#' # however, timestamps can be given like this
+#' txt <- c("2019-10-27 01:30",
+#'          "2019-10-27 02:00",
+#'          "2019-10-27 02:30",
+#'          "2019-10-27 02:00",
+#'          "2019-10-27 02:30",
+#'          "2019-10-27 03:00",
+#'          "2019-10-27 03:30")
+#' x <- asct(txt, tz="CET")
+#' x
+#' asct(x, tz="GMT")
+#' # Again can be detected, since all steps are not equal
+#' plot(diff(asct(x, tz="GMT")))
+#' # This can be fixed by (note that it can go wrong, e.g. with gaps around convertion etc.)
+#' asct(x, tz="GMT", duplicatedadd=3600)
+#'
+#' @export
+
+asct <- function(object, tz, ...){
+    UseMethod("asct")
+}
+
+
+#' @rdname asct
+#' @section Methods:
+#'     - asct.character: Simply a wrapper for \code{as.POSIXct} with default \code{tz}
+#' @export
+asct.character <- function(object, tz = "GMT", ...){
+    as.POSIXct(object, tz=tz, ...)
+}
+
+#' @rdname asct
+#' @section Methods:
+#'     - asct.POSIXct: Changes the time zone of the object if \code{tz} is given.
+#' @export
+asct.POSIXct <- function(object, tz = NA, duplicatedadd = NA){
+    if(!is.na(tz)){
+        attr(object, "tzone") <- tz
+    }
+    if(!is.na(duplicatedadd)){
+        # To mitigate the problem of duplicated timestamps at the shift to winter time
+        # then shift the time of duplicated time stamps with the seconds given
+        object[which(duplicated(object))] <- object[which(duplicated(object))] + duplicatedadd
+    }
+    return(object)
+}
+
+#' @rdname asct
+#' @section Methods:
+#'     - asct.POSIXlt: Converts to POSIXct.
+#' @export
+asct.POSIXlt <- function(object, tz = NA, duplicatedadd = NA){
+    as.POSIXct(asct.POSIXct(object, tz, duplicatedadd))
+}
+
+#' @rdname asct
+#' @section Methods:
+#'     - asct.numeric: Converts from UNIX time in seconds to POSIXct with \code{tz} as GMT.
+#' @export
+asct.numeric <- function(object){
+    ISOdate(1970, 1, 1, 0) + object
+}
diff --git a/R/aslt.R b/R/aslt.R
new file mode 100644
index 0000000000000000000000000000000000000000..75d51defab257413ac0720479797a4029343d3b6
--- /dev/null
+++ b/R/aslt.R
@@ -0,0 +1,73 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?aslt
+#?aslt.default
+
+#' The argument is converted into POSIXlt with tz="GMT".
+#'
+#' 
+#' 
+#' @title Convertion to POSIXlt
+#' @param object 
+#' @param tz Timezone. If set, then the time zone will be changed of the object.
+#' @param ... 
+#' @return An object of class POSIXlt
+#' @section Methods:
+#' #' @examples
+#' 
+#' # Create a POSIXlt with tz="GMT"
+#' aslt("2019-01-01")
+#' class(aslt("2019-01-01"))
+#' aslt("2019-01-01 01:00:05")
+#'
+#' # Convert between time zones
+#' x <- aslt("2019-01-01", tz="CET")
+#' aslt(x,tz="GMT")
+#'
+#' # To seconds and back again
+#' aslt(as.numeric(x, units="sec"))
+#' 
+#' @export
+aslt <- function(object, tz, ...){
+    UseMethod("aslt")
+}
+
+#' @rdname aslt
+#' @section Methods:
+#'     - aslt.character: Simply a wrapper for \code{as.POSIXlt}
+#' @export
+aslt.character <- function(object, tz = "GMT", ...){
+    as.POSIXlt(object, tz = tz, ...)
+}
+
+#' @rdname aslt
+#' @section Methods:
+#'     - aslt.POSIXct: Converts to POSIXct.
+#' @export
+aslt.POSIXct <- function(object, tz = NA){
+    if(!is.na(tz)){
+        attr(object, "tzone") <- tz
+    }
+    as.POSIXlt(object)
+}
+
+#' @rdname aslt
+#' @section Methods:
+#'     - aslt.POSIXlt: Changes the time zone of the object if tz is given.
+#' @export
+aslt.POSIXlt <- function(object, tz = NA){
+    if(!is.na(tz)){
+        attr(object, "tzone") <- tz
+    }
+    return(object)
+}
+
+#' @rdname aslt
+#' @section Methods:
+#'     - aslt.numeric: Converts from UNIX time in seconds to POSIXlt.
+#' @export
+aslt.numeric <- function(object){
+    as.POSIXlt(ISOdate(1970, 1, 1, 0) + object)
+}
diff --git a/R/bspline.R b/R/bspline.R
new file mode 100644
index 0000000000000000000000000000000000000000..92848d62976602fe026da41c5caefdf82fbff67e
--- /dev/null
+++ b/R/bspline.R
@@ -0,0 +1,91 @@
+# # Do this in a separate file to see the generated help:
+# library(devtools)
+# document()
+# load_all(as.package("../../onlineforecast"))
+# ?bspline
+
+
+#' Compute base splines of a variable using the R function \code{splines::bs}, use in the transform stage.
+#'
+#' Simply wraps the \code{splines::bs}, such that it can be used in the transformation stage.
+#'
+#' See the help for all arguments with \code{?splines::bs}. NOTE that two arguments have different default values.
+#'
+#' For more examples of use see ??ref(solar forecast vignette).
+#' 
+#' @family Transform stage functions
+#' 
+#' @param X data.frame (as part of data.list) with horizons as columns named \code{kxx} (i.e. one for each horizon)
+#' @param Boundary.knots The value is NA: then the boundaries are set to the range of each horizons (columns in X). See \code{?splines::bs}
+#' @param intercept Default value is TRUE: in an onlineforecast model there is no intercept per defauls (set by \code{ones()}. See \code{?splines::bs}
+#' @param df See \code{?splines::bs}
+#' @param knots See \code{?splines::bs}
+#' @param degree See \code{?splines::bs}
+#' @return List of data frames with the computed base splines, each with columns for the same horizons as in X
+#' @examples
+#'
+#' # How to make a diurnal curve using splines
+#' # Select first 54 hours from the load data
+#' D <- subset(Dbuildingheatload, 1:54, kseq=1:4)
+#' # Make the hour of the day as a forecast input
+#' D$tday <- make_tday(D$t, kseq=1:4)
+#' D$tday
+#' 
+#' # Calculate the base splines for each column in tday
+#' L <- bspline(D$tday)
+#'
+#' # Now L holds a data.frame for each base spline
+#' str(L)
+#' # Hence this will result in four inputs for the regression model
+#'
+#' # Plot (note that the splines period starts at tday=0)
+#' plot(D$t, L$bs1$k1, type="s")
+#' for(i in 2:length(L)){
+#'   lines(D$t, L[[i]]$k1, col=i, type="s")
+#' }
+#'
+#' # In a model formulation it will be:
+#' model <- forecastmodel$new()
+#' model$add_inputs(mutday = "bspline(tday)")
+#' # Such that at the transform stage will give the same as above
+#' model$transform_data(D)
+#'
+#'
+#' @export
+bspline <- function(X, Boundary.knots = NA, intercept = TRUE, df = NULL, knots = NULL, degree = 3) {
+    # If a list, then call on each element
+    if (class(X) == "list") {
+        # Call again for each element
+        val <- lapply(1:length(X), function(i) {
+            bspline(X[[i]], df = df, knots = knits, degree = degree, intercept = intercept, 
+                    Boundary.knots = Boundary.knots)
+        })
+        nams(val) <- nams(X)
+        return(val)
+    }
+    # X is a data.frame or matrix
+    # First find the horizons, they are used in the end
+    nms <- nams(X)
+    # Run for each horizon and calculate the basis splines
+    L <- lapply(1:ncol(X), function(i) {
+      if (is.na(Boundary.knots[1])) {
+          Boundary.knots <- range(X[, i], na.rm=TRUE)
+      }
+      spls <- splines::bs(X[, i], Boundary.knots = Boundary.knots, degree = degree, df = df,
+                          knots = knots, intercept = intercept)
+      return(spls)
+    })
+    # Now we have a bs value in L for each horizon
+    # Separate each basespline in a data.frame with all horizons
+    L <- lapply(1:ncol(L[[1]]), function(i) {
+      tmp <- lapply(L, function(x) {
+        x[ ,i]
+      })
+      tmp <- data.frame(do.call("cbind", tmp))
+      nams(tmp) <- nms
+      return(tmp)
+    })
+    # Set the extra name
+    nams(L) <- pst("bs", 1:length(L))
+    return(L)
+}
diff --git a/R/cache_name.R b/R/cache_name.R
new file mode 100644
index 0000000000000000000000000000000000000000..5085150a4cf2a42df17cf9d880d335fbcf692919
--- /dev/null
+++ b/R/cache_name.R
@@ -0,0 +1,95 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?cache_name
+
+#' Caching of the value returned by a function
+#'
+#' Use it in the beginning of a function, which runs a time consuming calculation, like fitting a model using optimization.
+#'
+#' It makes a cache name, which can be used to save a unique cache file (see \code{\link{cache_save}()}).
+#'
+#' The \code{cache_name} function must receive all the objects (in \code{...}) which influence the value of the function. It simply calculates a checksum using the \code{digest} package.
+#' 
+#' Further, it finds the name of the calling function and its definition, such that if anything changes in the function definition, then the cache file name changes too.
+#'
+#' @title Generation of a name for a cache file for the value of a function.
+#' @param ... The objects from which to calculate cache file name.
+#' If no objects given, then all the objects of the calling function are used for generating the checksum for the file name.
+#' @param cachedir Path for saving the cache, i.e. prefixed to the generated name, remember to end with '/' to make a directory.
+#' @return A generated cache file name.
+#' @examples 
+#' # A function for demonstrating the using caching
+#' fun <- function(x, y){
+#'     # Generate the cache name (no argument given, so both x and y is used)
+#'     nm <- cache_name()
+#'     # If the result is cached, then just return it
+#'     if(file.exists(nm)){ return(readRDS(nm)) }
+#'     # Do the calculation
+#'     res <- x^2 + y + 1
+#'     # Wait 1 sec
+#'     Sys.sleep(1)
+#'     # Save for cache
+#'     cache_save(res, nm)
+#'     # Return
+#'     return(res)
+#' }
+#' 
+#' # First time it takes at least 1 sec.
+#' fun(x=2,y=2)
+#' # Second time it loads the cache and is much faster
+#' fun(x=2,y=2)
+#' # Try changing the arguments (x,y) and run again
+#'
+#' # See the cache file(s)
+#' dir("cache")
+#' # Delete the cache
+#' file.remove(dir("cache", full.names=TRUE))
+#'
+#' # Demonstrate how cache_name() is functioning
+#' # Cache using the all objects given in the function calling, i.e. both x and y
+#' fun <- function(x,y){
+#'     x^2 + y + 1
+#'     return(cache_name())
+#' }
+#' # These are the same (same values)
+#' fun(x=1,y=2)
+#' fun(1,2)
+#' fun(y=2,x=1)
+#' # But this one is different
+#' fun(x=2,y=1)
+#' \dontshow{
+#' # Testing
+#' if(fun(1,2) != fun(x=1,y=2) | fun(1,2) != fun(y=2,x=1)){ stop("A problem with cache_name() exists.") }
+#' }
+#'
+#' # Test: cache using the values specified in the cache_name call
+#' fun2 <- function(x,y){
+#'     x^2 + y + 1
+#'     return(cache_name(x))
+#' }
+#'
+#' # So now its only the x value that change the name
+#' fun2(1,2)
+#' fun2(1,3)
+#' # But this one is different 
+#' fun2(3,3)
+#' # And the function named changed the name
+#'
+#'
+#' @export
+cache_name <- function(..., cachedir = "cache"){
+    # Get the name, definition and arguments of the function from which cache_name was called
+    funname <- strsplit(deparse(sys.calls()[[sys.nframe()-1]]), "\\(")[[1]][1]
+    fundef <- digest::digest(attr(eval(parse(text=funname)), "srcref"))
+    # if no arguments were given, then use the arguments function from which cache_name was called
+    if(length(list(...)) == 0){
+        funargs <- digest::digest(as.list( match.call(def = sys.function( -1 ), call = sys.call(-1)))[-1])
+    }else{
+        funargs <- digest::digest(list(...))
+    }
+    # Create the md5 checksum filename with digest
+    filename <- paste0(funname,"_",fundef,"_",funargs,".RDS")
+    return(gsub("//","/",paste0(cachedir,"/",filename)))
+}
diff --git a/R/cache_save.R b/R/cache_save.R
new file mode 100644
index 0000000000000000000000000000000000000000..72c68f2764605a3e2f1480f64a29f05e3ff7e078
--- /dev/null
+++ b/R/cache_save.R
@@ -0,0 +1,17 @@
+#' Saves the object as an .RDS file with the filename
+#'
+#' See the examples for \code{\link{cache_name}()}.
+#'
+#' @title Save a cache file (name generated with \code{code_name()}
+#' @param object The object to cache (i.e. the value of the evaluating function).
+#' @param filename The cache file name (i.e. use the one generated by cache_name, see examples).
+#' @return NULL
+#'
+#' @export
+cache_save <- function(object, filename){
+    dir.create(dirname(filename), showWarnings=FALSE, recursive=TRUE)
+    saveRDS(object, filename)
+}
+
+
+
diff --git a/R/data.list.R b/R/data.list.R
new file mode 100644
index 0000000000000000000000000000000000000000..e4c0379c4cb414a1e7ffb7b8b094dcc3062224de
--- /dev/null
+++ b/R/data.list.R
@@ -0,0 +1,456 @@
+# Do this in a separate tmp.R file to check the documentation
+# library(devtools)
+# document()
+# load_all(as.package("../../onlineforecast"))
+# ?as.data.list
+# ?data.list
+#?as.data.list.data.frame
+
+
+#' Make a data.list of the vectors and data.frames given.
+#'
+#' See the vignette ??{setup-data} on how a data.list must be setup.
+#' 
+#' It's simply a list of class \code{data.list} holding:
+#'   - vector \code{t}
+#'   - vector(s) of observations
+#'   - data.frames (or matrices) of forecast inputs
+#' 
+#' 
+#' @title Make a data.list
+#' @param ... Should hold: time t, observations as vectors and forecasts as data.frames
+#' @return a data.list.
+#' @examples
+#' # Put together a data.list
+#' # The time vector
+#' time <- seq(asct("2019-01-01"),asct("2019-01-02"),by=3600)
+#' # Observations time series (as vector)
+#' x.obs <- rnorm(length(time))
+#' # Forecast input as data.frame
+#' X <- data.frame(matrix(rnorm(length(time)*3), ncol=3))
+#' names(X) <- pst("k",1:3)
+#' 
+#' D <- data.list(t=time, x.obs=x.obs, X=X)
+#'
+#' # Check it
+#' check(D)
+#' 
+#' @export
+data.list <- function(...) {
+    structure(list(...), class = "data.list")
+}
+
+
+#' Take a subset of a data.list.
+#'
+#' Different arguments can be given to select the subset. See the examples.
+#' 
+#' @title Take a subset of a data.list.
+#' @param x The data.list to take a subset of.
+#' @param subset Given as the integer indexes or a logical vector, or alternatively \code{c(tstart,tend)}, where tstart and tend are either as POSIX or characters.
+#' @param nms The names of the variables in \code{x} to be included.
+#' @param kseq The k horizons of forecasts to be included.
+#' @param lagforecasts Should the forecasts be lagged k steps (thus useful for plotting etc.).
+#' @param pattern Regex pattern applied to select the variables in x to be included.
+#' @return a data.list with the subset.
+#' @examples
+#' # Use the data.list with building heat load 
+#' D <- Dbuildingheatload
+#' # Take a subset for the example
+#' D <- subset(D, 1:10, nms=c("t","Ta.obs","Ta","I.obs","I"), kseq=1:3)
+#' 
+#' # Take subset index 2:4
+#' subset(D, 2:4)
+#' 
+#' # Take subset for a period
+#' subset(D, c("2010-12-15 02:00","2010-12-15 04:00"))
+#' 
+#' # Cannot request a variable not there
+#' \donttest{subset(D, nms=c("x","Ta"))}
+#' 
+#' # Take specific horizons
+#' subset(D, nms=c("I","Ta"), kseq = 1:2)
+#' subset(D, nms=c("I","Ta"), kseq = 1)
+#' 
+#' # Lag the forecasts such that they are aligned in time with observations
+#' subset(D, nms=c("Ta.obs","Ta"), kseq = 2:3, lagforecasts = TRUE)
+#' 
+#' # The order follows the order in nms
+#' subset(D, nms=c("Ta","I"), kseq = 2)
+#' 
+#' # Return variables mathing a regex
+#' subset(D, kseq=2, pattern="^I")
+#' 
+#' # Take data for Ta and lag the forecasts (good for plotting and fitting a model)
+#' X <- subset(Dbuildingheatload, 1:1000, pattern="^Ta", kseq = 10, lagforecasts = TRUE)
+#' 
+#' # A scatter plot between the forecast and the observations (try lagforecasts = FALSE and see the difference)
+#' plot(X$Ta$k10, X$Ta.obs)
+#'
+#' # Fit a model for the 10-step horizon
+#' abline(lm(Ta.obs ~ Ta.k10, X), col=2)
+#'
+#' @export
+subset.data.list <- function(x, subset = NA, nms = NA, kseq = NA, lagforecasts = FALSE, pattern = NA) {
+    D <- x
+    # --------------------------------
+    # Set nms if needed (find the columns to take)
+    if(is.na(nms[1])){
+        nms <- names(D)
+    }
+    # If a pattern is given then find the columns
+    if(!is.na(pattern[1])){
+        # If the pattern has an or "|", then split on it to get the right order of the names
+        nms <- unlist(sapply(strsplit(pattern, "\\|")[[1]], function(pat){
+            grep(pat, names(D), value=TRUE)
+        }))
+    }
+    # --------------------------------
+    # Input checks
+    # Check if all variables are in nms
+    if(!all(nms %in% names(D))){ stop(pst("The variable ",nms[nms %in% names(D)]," is not in D"))}
+    #
+    if(!is.na(kseq)[1]){
+        lapply(1:length(nms), function(i){
+            X <- D[[nms[i]]]
+            if(class(X)[1] == "data.frame" ){
+                # Check if holds forecasts by checking if any name is "kxx"
+                if(length(grep("^k[[:digit:]]+$", names(X))) > 0){
+                    # If it holds forecasts, check that they are all there
+                    if( !all(pst("k",kseq) %in% names(X)) ){
+                        warning(pst("The variable ",nms[i]," contain ",pst(names(X),collapse=",")," hence doesn't contain all k in kseq = ",pst(kseq,collapse=",")))
+                    }
+                }
+            }
+        })
+    }
+    # --------------------------------
+    # If subset is NA then set it
+    if(is.na(subset[1])){
+        if(is.null(dim(D[[1]]))){
+            subset <- 1:length(D[[1]])
+        }else{
+            subset <- 1:dim(D[[1]])[1]
+        }
+    }else if(length(subset) == 2){
+        if(any(class(subset) %in% c("character","POSIXlt","POSIXct","POSIXt"))){
+            # Start and end of a period is given
+            subset <- in_range(subset[1], D$t, subset[2])
+        }
+    }else{
+        # Check if a non-meaningful subset is given
+        if(any(class(subset) == "character")){
+            stop("subset cannot be a character, except if it is of length 2 and can be converted in a POSIX, e.g. subset=c('2020-01-01','2020-01-10'. ")
+        }
+    }
+    # Take all horizons k?
+    if(is.na(kseq[1])){
+        val <- lapply(D[nms], function(X) {
+            if (any(class(X) == "data.frame")) {
+                return(X[subset, , drop=FALSE]) # drop = FALSE needed in case data frame only has 1 column, otherwise this does not return a data frame
+            } else {
+                return(X[subset])
+            }
+        })
+    }else{
+        # Multiple horizons (hence length(kseq) > 1)
+        # Take the specified horizons
+        val <- lapply(D[nms], function(X) {
+            if (any(class(X) == "data.frame")) {
+                # Check if holds forecasts by checking if any name is "kxx"
+                if(length(grep("^k[[:digit:]]+$", names(X))) > 0){
+                    return(X[subset,pst("k",kseq), drop=FALSE])
+                }else{
+                    return(X[subset, , drop=FALSE])
+                }
+            } else {
+                return(X[subset])
+            }
+        })
+    }
+    # Lag the forecasts k if specified
+    if(lagforecasts){
+        val <- lapply(val, function(X){
+            if(any(class(X) == "data.frame") & length(grep("^k[[:digit:]]+$",names(X))) > 0) {
+                return(lag.data.frame(X, lag="+k"))
+            }else{
+                return(X)
+            }
+        })
+    }
+    class(val) <- "data.list"
+    return(val)
+}
+
+
+#' Converts a data.list to a data.frame.
+#'
+#' The forecasts in the data.list will result in columns named \code{varname.kxx} in the data.frame.
+#' 
+#' @title Convert to data.frame
+#' @param x The data.list to be converted.
+#' @return A data.frame
+#' @examples
+#'
+#' #' # Use the data.list with building heat load 
+#' D <- Dbuildingheatload
+#' # Take a subset
+#' D <- subset(D, 1:5, nms=c("t","Ta.obs","Ta","I.obs","I"), kseq=1:3)
+#'
+#' # Convert to a data.frame, note the names of the forecasts are appended .kxx (i.e. for Ta and I)
+#' as.data.frame(D)
+#'
+#' @export
+as.data.frame.data.list <- function(x){
+    # Then convert into a data.frame
+    val <- do.call("cbind", x)
+    if(class(val) == "matrix"){
+        val <- as.data.frame(val)
+    }
+    # Fix names of data.frames (i.e. forecasts, their names are now "kxx", but should be X.kxx)
+    i <- grep("^k[[:digit:]]+$", names(val))
+    if(length(i) > 0){
+        names(val)[i] <- pst(names(x)[i],".",names(val)[i])
+    }
+    return(val)
+}
+
+
+#' Generate a pairs plot for the vectors in the data.list.
+#'
+#' A very useful plot for checking what is in the forecasts, how they are synced and match the observations.
+#' 
+#' @title Generation of pairs plot for a data.list.
+#' @param x The data.list from which to plot
+#' @param lagforecasts Lag the forecasts such that they are synced with obervations?
+#' @param includet Include t?
+#' @param lower.panel Passed to pairs().
+#' @param panel Passed to pairs().
+#' @param pch Passed to pairs().
+#' @param cex Passed to pairs().
+#' @param ... Passed to pairs().
+#' @examples
+#' # Take a subset for the example
+#' D <- subset(Dbuildingheatload, c("2010-12-15","2011-01-15"), pattern="^Ta|^I", kseq=1:3)
+#' pairs(D)
+#'
+#' # If the forecasts and the observations are not aligned in time it is easy to see by comparing to the previous plot.
+#' pairs(D, lagforecasts=FALSE)
+#' # Especially for the solar I syncronization is really important!
+#' # Hence if the forecasts were not synced properly, then it can be detected using this type of plot.
+#'
+#' # Alternatively, lag when taking the subset
+#' D <- subset(Dbuildingheatload, c("2010-12-15","2011-01-15"), pattern="^Ta|^I", kseq=1:3, lagforecasts=TRUE)
+#' pairs(D, lagforecasts=FALSE)
+#' 
+#' @export
+pairs.data.list <- function(x, subset = NA, nms = NA, kseq = NA, lagforecasts = TRUE, pattern = NA, lower.panel=NULL, panel=panel.smooth, pch=20, cex=0.7, ...){
+    # First take the subset
+    X <- as.data.frame(subset(x, subset = subset, nms = nms, kseq = kseq, lagforecasts = lagforecasts, pattern = pattern))
+    #
+    pairs(X, lower.panel=lower.panel, panel=panel, pch=pch, cex=cex, ...)
+}
+
+
+
+#' Checking the object for appropriate form. 
+#'
+#' Prints on table form the result of the check.
+#' 
+#' @title Checking the object for appropriate form. 
+#' @param object The object to be checked.
+#' @return The tables generated.
+#'
+#' # Check a data.list (see \code{?\link{check.data.list}})
+#' check(Dbuildingheatload)
+#' 
+#' @export
+check <- function(object){
+    UseMethod("check")
+}
+
+#' Checking the data.list for appropriate form. 
+#'
+#' Prints a check of the time vector t, which must have equidistant time points and no NAs.
+#'
+#' Then the results of checking vectors (observations):
+#'   - ok: A 'V' indicates a successful check
+#'   - maxNAs: Proportion of NAs
+#'   - length: printed if not the same as the 't' vector
+#'   - class: the class
+#' 
+#' Then the results of checking data.frames and matrices (forecasts):
+#'   - ok: a 'V' indicates a successful check
+#'   - maxNAs: the proportion of NAs for the horizon (i.e. column) with the highest proportion of NAs
+#'   - meanNAs: the proportion of NAs of the entire data.frame
+#'   - nrow: printed if not the same as the 't' vector length
+#'   - colnames: columns must be names 'kxx', where 'xx' is the horizon
+#'   - sameclass: 'X' if not all columns are the same class
+#'   - class: prints the class of the columns if they are all the same
+#' 
+#' @title Checking the data.list for appropriate form. 
+#' @param object The object to be checked.
+#' @return The tables generated.
+#'
+#' # Check a data.list (see \code{?\link{check.data.list}})
+#' check(Dbuildingheatload)
+#'
+#' # Vector with observations not same length as t
+#' D <- Dbuildingheatload
+#' D$heatload <- D$heatload[1:10]
+#' check(D)
+#'
+#' # Some NAs in k1 forecast
+#' D <- Dbuildingheatload
+#' D$Ta$k1[1:1500] <- NA
+#' check(D)
+#'
+#' # Wrong column names
+#' names(D$Ta)
+#'
+#' @export
+check.data.list <- function(object){
+    # Check if how the data.list is setup and report potential issues
+    D <- object
+    if(!"t" %in% names(D)){ stop("'t' is missing in the data.list: It must be a vector of equidistant time points (can be an integer, but preferably POSIXct class with tz 'GMT' or 'UTC'.)") }
+
+    if(length(unique(diff(D$t))) != 1){ stop("'t' is not equidistant and have no NA values")}
+    cat("\nTime t is fine: Length ",length(D$t),"\n\n")
+
+    # Which is data.frame or matrix?
+    dfOrMat <- sapply(D, function(x){ (class(x) %in% c("matrix","data.frame"))[1] })
+    # Vectors check
+    vecchecks <- c("ok","NAs","length","class")
+    vecseq <- which(!dfOrMat & names(dfOrMat) != "t")
+    Observations <- data.frame(matrix("", nrow=length(vecseq), ncol=length(vecchecks), dimnames=list(names(vecseq),vecchecks)), stringsAsFactors=FALSE)
+    Observations$ok <- "V"
+    #
+    for(i in 1:length(vecseq)){
+        #
+        nm <- names(vecseq)[i]
+        # NAs
+        NAs <- round(max(sum(is.na(D[nm])) / length(D[nm])))
+        Observations$NAs[i] <- pst(NAs,"%")
+        # Check the length
+        if(length(D[[nm]]) != length(D$t)){
+            Observations$length[i] <- length(D[[nm]])
+        }
+        # Its class
+        Observations$class[i] <- class(D[[nm]])
+        # Not ok?
+        if(sum(Observations[i, 3] == "") < 1){
+            Observations$ok[i] <- ""
+        }
+    }
+    #
+    # For forecasts
+    dfseq <- which(dfOrMat)
+    dfchecks <- c("ok","maxNAs","meanNAs","nrow","colnames","sameclass","class")
+    Forecasts <- data.frame(matrix("", nrow=length(dfseq), ncol=length(dfchecks), dimnames=list(names(dfseq),dfchecks)), stringsAsFactors=FALSE)
+    Forecasts$ok <- "V"
+    #
+    for(i in 1:length(dfseq)){
+        #
+        nm <- names(dfseq)[i]
+        colnms <- nams(D[[nm]])
+        # max NAs
+        maxNAs <- round(max(sapply(colnms, function(colnm){ 100*sum(is.na(D[[nm]][ ,colnm])) / nrow(D[[nm]]) })))
+        Forecasts$maxNAs[i] <- pst(maxNAs,"%")
+        # Mean NAs
+        meanNAs <- round(mean(sapply(colnms, function(colnm){ 100*sum(is.na(D[[nm]][ ,colnm])) / nrow(D[[nm]]) })))
+        Forecasts$meanNAs[i] <- pst(meanNAs,"%")
+        # Check the number of rows
+        if(nrow(D[[nm]]) != length(D$t)){
+            Forecasts$nrow[i] <- nrow(D[[nm]])
+        }
+        # Check the colnames, are they unique and all k+integer?
+        if(!length(unique(grep("^k[[:digit:]]+$",colnms,value=TRUE))) == length(colnms)){
+            Forecasts$colnames[i] <- "X"
+        }
+        if(!length(unique(sapply(colnms, function(colnm){ class(D[[nm]][ ,colnm]) }))) == 1){
+            Forecasts$sameclass[i] <- "X"
+        }else{
+            Forecasts$class[i] <- class(D[[nm]][ ,1])
+        }
+        # Not ok?
+        if(sum(Forecasts[i, ] == "") < (length(dfchecks)-4)){
+            Forecasts$ok[i] <- ""
+        }
+    }
+    #
+    cat("Observation vectors:\n")
+    print(Observations)
+    cat("\nForecast data.frames or matrices:\n")
+    print(Forecasts)
+
+    invisible(list(Observations=Observations, Forecasts=Forecasts))
+}
+
+
+
+#' Compare two data.lists
+#'
+#' Returns TRUE if the two data.lists are fully identical, so all data, order of variables etc. must be fully identical
+#' 
+#' @title Determine if two data.lists are identical
+#'
+#' @param x first data.list  
+#' @param y second data.list
+#' @return logical
+#'
+#' @examples
+#'
+#' Dbuildingheatload == Dbuildingheatload
+#'
+#' D <- Dbuildingheatload
+#' D$Ta$k2[1] <- NA
+#' Dbuildingheatload == D
+#'
+#' D <- Dbuildingheatload
+#' names(D)[5] <- "I"
+#' names(D)[6] <- "Ta"
+#' Dbuildingheatload == D
+#' 
+#' 
+
+"==.data.list" <- function(x, y) {
+    if(length(x) != length(y)){
+        return(FALSE)
+    }
+    if(any(names(x) != names(y))){
+        return(FALSE)
+    }
+    # Check each variable
+    tmp <- lapply(1:length(x), function(i){
+        xi <- x[[i]]
+        yi <- y[[i]]
+        if(length(class(xi)) != length(class(yi))){
+            return(FALSE)
+        }
+        if(any(class(xi) != class(yi))){
+            return(FALSE)
+        }
+        if(is.null(dim(xi))){
+            # It's a vector
+            if(length(xi) != length(yi)){
+                return(FALSE)
+            }
+        }else{
+            # It's a data.frame or matrix
+            if(any(dim(xi) != dim(yi))){
+                return(FALSE)
+            }
+        }
+        # Check the NA values are the same
+        if(any(is.na(xi) != is.na(yi))){
+            return(FALSE)
+        }
+        # Check the values
+        all(xi == yi, na.rm=TRUE)
+    })
+    if(any(!unlist(tmp))){
+        return(FALSE)
+    }
+    # All checks passed
+    return(TRUE)
+}
diff --git a/R/forecastmodel.R b/R/forecastmodel.R
new file mode 100644
index 0000000000000000000000000000000000000000..56347f5828c742b60c2195481de08d9f856e5f16
--- /dev/null
+++ b/R/forecastmodel.R
@@ -0,0 +1,352 @@
+#' @export
+forecastmodel <- R6::R6Class("forecastmodel", public = list(
+    ##----------------------------------------------------------------
+    ## Fields used for setting up the model
+    ## 
+    ## The expression (as character) used for generating the regprm
+    regprmexpr = NA,
+    ## Regression parameters for the function used for fitting (rls, ls, etc.)
+    regprm = list(), 
+    ## The off-line parameters
+    prmbounds = as.matrix(data.frame(lower=NA, init=NA, upper=NA)),
+    ## List of inputs (which are R6 objects) (note the "cloning of list of reference objects" issue below in deep_clone function)
+    inputs = list(),
+    ## Name of the output
+    output = "y",
+    ## The range of the output to be used for cropping the output
+    outputrange = NA,
+    ##----------------------------------------------------------------
+
+    
+    ##----------------------------------------------------------------
+    ## Fields to be used when the model is fitted
+    ##
+    ## The horizons to fit for
+    kseq = NA,
+    ## The (transformation stage) parameters used for the fit
+    prm = NA,
+    ## Stores the maximum lag for AR terms
+    maxlagAR = NA,
+    ## Stores the maxlagAR past values of y for the update when new obs becomes available
+    yAR = NA,
+    ## The fits, one for each k in kseq (simply a list with the latest fit)
+    Lfits = list(),
+    ## Transformed input data (data.list with all inputs; or local fitted models: ??data.frame with all data??)
+    datatr = NA,
+    ##----------------------------------------------------------------
+
+    
+    ##----------------------------------------------------------------
+    ## Contructor function
+    initialize = function(){},
+    ##----------------------------------------------------------------
+
+    
+    ##----------------------------------------------------------------    
+    ## Add inputs to the model
+    add_inputs = function(...){
+        dots <- list(...)
+        for (i in 1:length(dots)){
+            self$inputs[[ nams(dots)[i] ]] <- input_class$new(dots[[i]], model=self)
+        }
+    },
+    ##----------------------------------------------------------------
+
+    ##----------------------------------------------------------------
+    ## Add the expression (as character) which generates the regression parameters
+    add_regprm = function(regprmexpr){
+        self$regprmexpr <- regprmexpr
+        self$regprm <- eval(parse(text = self$regprmexpr))
+    },
+    ##----------------------------------------------------------------
+
+    
+    ##----------------------------------------------------------------
+    ## Add the transformation parameters and bounds for optimization
+    add_prmbounds = function(...) {
+        dots <- list(...)
+        for (i in 1:length(dots)) {
+            nm <- names(dots)[i]
+            if (nm %in% rownames(self$prmbounds)) {
+                self$prmbounds[nm, ] <- dots[[i]]
+            } else {
+                if(nrow(self$prmbounds) == 1 & is.na(self$prmbounds[1,2])){
+                    self$prmbounds[1, ] <- dots[[i]]
+                }else{
+                    self$prmbounds <- rbind(self$prmbounds, dots[[i]])
+                }
+                rownames(self$prmbounds)[nrow(self$prmbounds)] <- nm
+            }
+        }
+    },
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Get the transformation parameters
+    get_prmbounds = function(nm){
+        if(nm == "init"){
+            if(is.null(dim(self$prmbounds))){
+                val <- self$prmbounds[nm]
+            }else{
+                val <- self$prmbounds[ ,nm]
+                if(is.null(nams(val))){
+                    nams(val) <- rownames(self$prmbounds)
+                }
+            }
+        }
+        if(nm == "lower"){
+            if("lower" %in% nams(self$prmbounds)){
+                val <- self$prmbounds[,"lower"]
+                if(is.null(nams(val))){
+                    nams(val) <- rownames(self$prmbounds)
+                }
+            }else{
+                val <- -Inf
+            }
+        }
+        if(nm == "upper"){
+            if("upper" %in% nams(self$prmbounds)){
+                val <- self$prmbounds[,"upper"]
+                if(is.null(nams(val))){
+                    nams(val) <- rownames(self$prmbounds)
+                }
+            }else{
+                val <- Inf
+            }
+        }
+        names(val) <- row.names(self$prmbounds)
+        return(val)
+    },
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Insert the transformation parameters prm in the input expressions and regression expressions, and keep them (simply string manipulation)
+    insert_prm = function(prm){
+        # If just NA or NULL given, then don't do anything
+        if(is.null(prm) | (is.na(prm)[1] & length(prm) == 1)){
+            return(NULL)
+        }
+        ## MUST INCLUDE SOME checks here and print useful messages if something is not right
+        if(any(is.na(prm))){ stop(pst("None of the parameters (in prm) must be NA: prm=",prm)) }
+
+        ## Keep the prm
+        self$prm <- prm
+        ## Find if any opt parameters, first the ones with "__" hence for the inputs
+        pinputs <- prm[grep("__",nams(prm))]
+        ## If none found for inputs, then the rest must be for regression
+        if (length(pinputs) == 0 & length(prm) > 0) {
+            preg <- prm
+        } else {
+            preg <- prm[-grep("__",nams(prm))]
+        }
+        ## ################################
+        ## For the inputs, insert from prm if any found
+        if (length(pinputs)) {
+            pnms <- unlist(getse(strsplit(nams(pinputs),"__"), 1))
+            pprm <- unlist(getse(strsplit(nams(pinputs),"__"), 2))
+            ##
+            for(i in 1:length(self$inputs)){
+                for(ii in 1:length(pnms)){
+                    ## Find if the input i have prefix match with the opt. parameter ii
+                    if(pnms[ii]==nams(self$inputs)[i]){
+                        ## if the opt. parameter is in the expr, then replace
+                        self$inputs[[i]]$expr <- private$replace_value(name = pprm[ii],
+                                                                       value = pinputs[ii],
+                                                                       expr = self$inputs[[i]]$expr)
+                    }
+                }
+            }
+        }
+        ## ################################
+        ## For the fit parameters, insert from prm if any found
+        if (length(preg) & any(!is.na(self$regprmexpr))) {
+            nams(preg)
+            for(i in 1:length(preg)){
+                ## if the opt. parameter is in the expr, then replace
+                self$regprmexpr <- private$replace_value(name = nams(preg)[i],
+                                                         value = preg[i],
+                                                         expr = self$regprmexpr)
+            }
+        }
+        self$regprm <- eval(parse(text = self$regprmexpr))
+    },
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Function for transforming the input data to the regression data
+    transform_data = function(data){
+        ## Evaluate for each input the expresssion to generate the model input data
+        L <- lapply(self$inputs, function(input){
+            ## Evaluate the expression (input$expr)
+            L <- input$evaluate(data)
+            ## Must return a list
+            if(class(L)=="matrix"){ return(list(as.data.frame(L))) }
+            if(class(L)=="data.frame"){ return(list(L)) }
+            if(class(L)!="list"){ stop(pst("The value returned from evaluating: ",input$expr,", was not a matrix, data.frame or a list of them."))}
+            if(class(L[[1]])=="matrix"){ return(lapply(L, function(mat){ return(as.data.frame(mat)) })) }
+            return(L)
+        })
+        ## Put together in one data.list
+        L <- structure(do.call(c, L), class="data.list")
+        ##
+        return(L)
+    },
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Resets the input states
+    reset_state = function(){
+        ## Reset the inputs state
+        lapply(self$inputs, function(input){
+            input$state_reset()
+        })
+        ## Reset stored data
+        self$datatr <- NA
+        self$yAR <- NA
+    },
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Check if the model and data is setup correctly
+    check = function(data = NA){
+        ## some checks are done here, maybe more should be added (??also when transforming inputs, if something goes wrong its caught and message is printed)
+        ##
+        ## ################################################################
+        ## First check if the output is set correctly
+        if( is.na(self$output) ){
+            stop("Model output is NA, it must be set to the name of a variable in the data.list used.")
+        }
+        if( !(self$output %in% names(data)) ){
+            stop("Model output '",self$output,"' is not in the data provided: It must be set to the name of a variable in the data.list used.")
+        }
+        if( !(is.numeric(data[[self$output]])) ){
+            stop("The model output '",self$output,"' is not a numeric. It has to be a vector of numbers.")
+        }
+        if( length(data[[self$output]]) != length(data$t) ){
+            stop("The length of the model output '",self$output,"' is ",length(data[[self$output]]),", which is not equal to the length of the time vector (t), which is ",length(data$t))
+        }
+        ## ################################################################
+        ## Check that the kseq is set in the model
+        if( !is.numeric(self$kseq) ){
+            stop("'model$kseq' is not set. Must be an integer (or numeric) vector.")
+        }
+        ## ################################################################
+        ## Check all input variables are correctly set data
+        for(i in 1:length(self$inputs)){
+            ## Find all the variables in the expression
+            nms <- all.vars(parse(text=self$inputs[[i]]$expr[[1]]))
+            for(nm in nms){
+                if(class(data[[nm]]) %in% c("data.frame","matrix")){
+                    ## It's a forecast input, hence must have the k columns in kseq
+                    if(!all(self$kseq %in% as.integer(gsub("k","",names(data[[nm]]))))){
+                        missingk <- which(!self$kseq %in% as.integer(gsub("k","",names(data[[nm]]))))
+                        stop("The input variable '",nm,"' doesn't have all needed horizons.\nIt has ",pst(names(data[[nm]]),collapse=","),"\nIt is missing ",pst("k",self$kseq[missingk],collapse=","))
+                    }
+                    ## Check if the number of observations match
+                    if( nrow(data[[nm]]) != length(data$t) ){
+                        stop(pst("The input variable '",nm,"' doesn't have the same number of observations as time vector 't' in the data. It has ",nrow(data[[nm]]),", but 't' has ",length(data$t)))
+                    }
+                }else if(class(data[[nm]]) == "numeric"){
+                    ## Observation input, check the length
+                    if( length(data[[nm]]) != length(data$t) ){
+                        stop("The input variable '",nm,"' doesn't have the same number of observations as time vector 't' in the data. It has ",length(data[[nm]]),", but 't' has ",length(data$t))
+                    }
+                }else{
+                    stop("The variable '",nm,"' is missing in data, or it has the wrong class.\nIt must be class: data.frame, matrix or vector.\nIt is needed for the input expression '",self$inputs[[i]]$expr[[1]],"'")
+                }
+            }
+        }
+    },
+
+    ##----------------------------------------------------------------
+    clone_deep = function(){
+        ## First clone with deep=TRUE. Now also the inputes get cloned.
+        newmodel <- self$clone(deep=TRUE)
+        ## The inputs are cloned now, however the model fields in the inputs have not been updated, so do that
+        if(length(newmodel$inputs) > 0){
+            for(i in 1:length(newmodel$inputs)){
+                newmodel$inputs[[i]]$model <- newmodel
+            }
+        }
+        return(newmodel)
+    }
+    ##----------------------------------------------------------------
+    
+    ),
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Private functions
+    private = list(
+    ##----------------------------------------------------------------
+
+
+    ##----------------------------------------------------------------
+    ## Replace the value in "name=value" in expr
+    replace_value = function(name, value, expr){
+        ## First make regex
+        pattern <- gsub("\\.", ".*", name)
+        ## Try to find it in the input
+        pos <- regexpr(pattern, expr)
+        ## Only replace if prm was found
+        if(pos>0){
+            pos <- c(pos+attr(pos,"match.length"))
+            ## Find the substr to replace with the prm value
+            (tmp <- substr(expr, pos, nchar(expr)))
+            pos2 <- regexpr(",|)", tmp)
+            ## Insert the prm value and return
+            expr <- pst(substr(expr,1,pos-1), "=", value, substr(expr,pos+pos2-1,nchar(expr)))
+            # Print? Not used now
+            #if(printout){ cat(names(value),"=",value,", ",sep="")}
+        }
+        return(expr)
+    },
+    ##----------------------------------------------------------------
+
+    ##----------------------------------------------------------------
+    ## For deep cloning, in order to get the inputs list of R6 objects copied
+    deep_clone = function(name, value) {
+        ## With x$clone(deep=TRUE) is called, the deep_clone gets invoked once for
+        ## each field, with the name and value.
+        if (name == "inputs") {
+            ## Don't clone the inputs deep, since they have the model as a field and then it gets in an infinitie loop!
+            ## But have to update the model references, so therefore the function above "clone_deep" must be used
+            return(lapply(value, function(x){ x$clone(deep=FALSE) }))
+            ## ## `a` is an environment, so use this quick way of copying
+            ## list2env(as.list.environment(value, all.names = TRUE),
+            ##          parent = emptyenv())
+        }
+        ## For all other fields, just return the value
+        return(value)
+    }
+    ##----------------------------------------------------------------
+    )
+)
+
+
+
+#' Prints a forecast model
+#'
+#' A simple print out of the model output and inputs
+#' 
+#' @title Print forecast model
+#' @param object forecastmodel
+
+#' @export
+print.forecastmodel <- function(object){
+    model <- object
+    #    cat("\nObject of class forecastmodel (R6::class)\n\n")
+    cat("\nOutput:",model$output,"\n")
+    cat("Inputs: ")
+    cat(names(model$inputs)[1],"=",model$inputs[[1]]$expr,"\n")
+    for(i in 2:length(model$inputs)){
+        cat("       ",names(model$inputs)[i],"=",model$inputs[[i]]$expr,"\n")
+    }
+    cat("\n")    
+}
diff --git a/R/forecastmodel.R-documentation.R b/R/forecastmodel.R-documentation.R
new file mode 100644
index 0000000000000000000000000000000000000000..abb3c1c1577d74c34434ca6afb27ac611208ea97
--- /dev/null
+++ b/R/forecastmodel.R-documentation.R
@@ -0,0 +1,180 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?forecastmodel
+
+#' R6 class for a forecastmodel
+#' 
+#' This class holds the variables and functions needed for defining and setting up a forecast model - independent of the fitting scheme.
+#'
+#' See the vignettes ??(ref) on how to setup and use a model and the website for more.
+#'
+#' @name forecastmodel
+#' @details
+#' 
+#' Holds all the information needed independently of the fitting scheme (e.g. lm_fit or rls_fit), see the fields and functions below.
+#'
+#' The fields are separated into:
+#'   - Fields for setting up the model
+#'   - Fields used when fitting (e.g. which horizons to fit for is set in \code{kseq}
+#'
+#' See the fields description below.
+#' 
+#' Note, it's an R6 class, hence an object variable is a pointer (reference), which means two important points:
+#'  - In order to make a copy, the function clone_deep() must be used (usually \code{clone(deep=TRUE)}, but that will end in an infinite loop).
+#'  - It can be manimulated directly in functions (without return). The code is written such that no external functions manipulate the model object, except for online updating.
+#'
+#' For online updating (i.e. receiving new data and updating the fit), then the model definition and the data becomes entangled, since transformation functions like low-pass filtering with \code{\link{lp}()} requires past values.
+#' See the vignette ??(ref to online vignette) and note that \code{\link{rls_fit}()} resets the state, which is also done in all \code{xxx_fit} functions (e.g. \code{\link{rls_fit}}.
+#'
+#' 
+#' @section Public fields used for setting up the model:
+#'
+#'     - output = NA, character: Name of the output.
+#'
+#'     - inputs = list(), add them with add_inputs(): List of inputs (which are R6 objects) (note the "cloning of list of reference objects" issue below in deep_clone function)
+#'
+#'     - regprmexpr = NA: The expression (as character) used for generating the regprm, e.g. "\code{\link{rls_prm}()}" for RLS.
+#'
+#'     - regprm = list(): Regression parameters calculated by evaluating the \code{regprmexpr}.
+#'
+#'     - prmbounds = as.matrix(data.frame(lower=NA, init=NA, upper=NA)): The bounds for optimization of the parameters, e.g. with \code{\link{rls_optim}()}.
+#'
+#'     - outputrange = NA, numeric vector of length 2: Limits of the predictions cropped in the range, e.g. outputrange = c(0,Inf) removes all negative output predictions.
+#'
+#'
+#' @section Public fields used when the model is fitted:
+#'
+#'     - kseq = NA: The horizons to fit for.
+#'
+#'     - p = NA: The (transformation stage) parameters used for the fit.
+#'
+#'     - Lfits = list(): The regression fits, one for each k in kseq (simply a list with the latest fit).
+#'
+#'     - datatr = NA: Transformed input data (data.list with all inputs; or local fitted models: ??data.frame with all data??)
+#'
+#'
+#----------------------------------------------------------------
+#' @section Public methods:
+#' All public functions are described below and in examples a section for each is included:
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$new()}:
+#' Create a new `forecastmodel` object.
+#' 
+#' Returns a forecastmodel object.
+#' @examples
+#' # New object
+#' model <- forecastmodel$new()
+#'
+#' # Print it
+#' model
+#'
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$add_inputs(...)}:
+#'         Add inputs to the model.
+#'
+#'         - \code{...}: The inputs are given as arguments, see examples.
+#'
+#' @examples
+#'
+#' # Add model inputs
+#' model$add_inputs(Ta = "lp(Ta)")
+#' # See it
+#' model$inputs
+#' # Update to use no low-pass filter
+#' model$add_inputs(Ta = "Ta")
+#' model$inputs
+#' # Add another
+#' model$add_inputs(I = "lp(I)")
+#' model$inputs
+#'
+#' # Simply a list, so manipulate directly
+#' class(model$inputs$Ta)
+#' model$inputs$Ta$expr <- "lp(Ta, a1=0.9)"
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$add_regprm(regprm_expr)}:
+#' Add expression (as character) which generates regression parameters.
+#'
+#' @examples
+#'
+#' # Add the parameters for the regression stage
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#' # The evaluation is a list, which is set in
+#' model$regprm
+#' 
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$add_prmbounds(...)}:
+#' Add the transformation parameters and bounds for optimization.
+#'
+#' @examples
+#'
+#' # Set the lambda to be optimized between 0.9 and 0.999, starting at 0.99
+#' model$add_prmbounds(lambda = c(0.9, 0.99, 0.999))
+#' # Note the "__" syntax to set parameters for inputs: "input__prm"
+#' model$add_prmbounds(Ta__a1 = c(0.8, 0.95, 0.99))
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$get_prmbounds(...)}:
+#' Get the transformation parameter bounds, used by optimization functions e.g. \code{\link{rls_optim}()}.
+#'
+#' @examples
+#' 
+#' # Get the lower bounds
+#' model$get_prmbounds("lower")
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$insert_prm(prm)}:
+#' Insert the transformation parameters prm in the input expressions and regression expressions, and keep them (simply string manipulation).
+#'
+#' @examples
+#' 
+#' # Insert the init parameters
+#' prm <- model$get_prmbounds("init")
+#' prm
+#' # Before
+#' model$inputs$Ta$expr
+#' # After
+#' model$insert_prm(prm)
+#' model$inputs$Ta$expr
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$transform_data(data)}:
+#' Function for transforming the input data to the regression stage input data (see ??(ref to setup data and online updating vignette).
+#'
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$reset_state()}:
+#' Resets the input states and stored data for iterative fitting (datatr rows and yAR) (see ??(ref to online updating vignette).
+#'
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$check(data = NA)}:
+#' Check if the model is setup correctly.
+#'
+#' @examples
+#' 
+#' # Check if the model is setup and can be used with a given data.list
+#' model$check(Dbuildingheatload)
+#' # Add the model output
+#' model$output <- "heatload"
+#' model$check(Dbuildingheatload)
+#' # Add the horizons to fit for
+#' model$kseq <- 1:4
+#' # No errors, it's fine :)
+#' model$check(Dbuildingheatload)
+NULL
+
diff --git a/R/fs.R b/R/fs.R
new file mode 100644
index 0000000000000000000000000000000000000000..26fd928af7aa659660ed1d0cb2c5c425f7039bcf
--- /dev/null
+++ b/R/fs.R
@@ -0,0 +1,39 @@
+#' Generation of Fourrier series.
+#'
+#' Function for generating Fourrier series as a function of x E.g. use for
+#' harmonic functions for modelling the diurnal patterns or for basis functions.
+#'
+#' @param X must be a dataframe with columns k1,k2,..., . One period is from 0 to 1
+#' (so for example if X is hour of day, then divide X by 24 to obtain a daily period).
+#' @param nharmonics the number of harmonics, so creates double as many inputs! i.e. one sine and one cos for each harmonic.
+#' @return  Returns a list of dataframes (two for each i in \code{1:nharmonics}) with same number of columns as X.
+#' @examples
+#' # Make a data.frame with time of day in hours for different horizons
+#' tday <- make_tday(seq(asct("2019-01-01"), asct("2019-01-04"), by=3600), kseq=1:5)
+#' # See whats in it
+#' str(tday)
+#' head(tday)
+#'
+#' # Now use the function to generate Fourier series
+#' L <- fs(tday/24, nharmonics=2)
+#' # See what is in it
+#' str(L)
+#'
+#' # Make a plot to see the harmonics
+#' par(mfrow=c(2,1))
+#' # The first harmonic
+#' plot(L$sin1$k1, type="l")
+#' lines(L$cos1$k1, type="l")
+#' # The second harmonic
+#' plot(L$sin2$k1, type="l")
+#' lines(L$cos2$k1, type="l")
+#'
+#' 
+#' @export
+fs <- function(X, nharmonics) {
+    do.call("c", lapply(1:nharmonics, function(i) {
+        val <- list(sin(i * X * 2 * pi), cos(i * X * 2 * pi))
+        nams(val) <- pst(c("sin", "cos"), i)
+        return(val)
+    }))
+}
diff --git a/R/getse.R b/R/getse.R
new file mode 100644
index 0000000000000000000000000000000000000000..0a5b3f2de99d21fc04775dc43c99b110e0717a62
--- /dev/null
+++ b/R/getse.R
@@ -0,0 +1,91 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?getse
+
+
+#' A helping function for getting subelemlts from a list.
+#'
+#' Often it is needed to get a subelement from a list, which can be done using lapply.
+#' But to make life easiere here is a small function for getting subelements in a nested list at a certain debth.
+#' 
+#' @title Getting subelement from list.
+#' @param L The list to get sub elements from.
+#' @param inm Either an integer index or a name of the subelements to return.
+#' @param depth The depth of the subelements to match names in:
+#'     - 1: is directly in the list.
+#'     - 2: is in list of each element in the list.
+#'     - 3 and more: simply deeper in the sublists.
+#' @param useregex logical: should inm be used as regex pattern for returning elements matching, in the specified layer.
+#' @param fun function: if given, then it will be applied to all the matched subelements before returning them.
+#' @return A list of the matched elements.
+#' 
+#' @examples
+#' # Make a nested list
+#' L <- list(x1=list(x=list("val11","val112"),
+#'                   y=list("val12"),
+#'                   test=list("testlist2")),
+#'           x2=list(x=list("val21","val212"),
+#'                   y=list("val22"),
+#'                   test=list("testlist2")),
+#'           x3=list(x=list("val31","val312"),
+#'                   y=list("val32"),
+#'                   test=list("testlist3")))
+#'
+#' # Get the subelement "x1"
+#' str(getse(L, "x1", depth=1))
+#' # Same as
+#' str(L[["x1"]])
+#'
+#' # Get the element named x in second layer
+#' str(getse(L, "x", depth=2))
+#' # Depth is default to 2
+#' str(getse(L, "y"))
+#'
+#' # Nice when splitting string
+#' x <- strsplit(c("x.k1","y.k2"), "\\.")
+#' # Get all before the split "\\."
+#' getse(x, 1)
+#' # Get after
+#' getse(x, 2)
+#'
+#' # Will give an error when indexed (with integer) if the element is not there
+#' x <- strsplit(c("x.k1","y.k2","x2"), "\\.")
+#' getse(x, 1)
+#' getse(x, 2)
+#'
+#' # Use regex pattern for returning elements matching in the specified layer
+#' getse(L, "^te", depth=2, useregex=TRUE)
+#' 
+#' @export
+
+getse <- function(L, inm = NA, depth = 2, useregex = FALSE, fun = NA) {
+    if(depth < 0){ stop("depth has to be 1,2,3,...") }
+    # Get a list of all sub elements in L matching pattern at the given depth
+    # Depth==1 is directly the subelements of L, i.e. L[nms]
+
+    # Match directly in L?
+    if(depth == 1){
+        if(useregex){ inm <- grep(inm, names(L)) }
+        R <- L[[inm]]
+        if(class(fun) == "function"){ R <- fun(R) }
+    }
+    # Match in the subelements of L?
+    if(depth == 2){
+        R <- lapply(L, function(x){
+            if(useregex){ inm <- grep(inm, names(x)) }
+            val <- x[[inm]]
+            if(class(fun) == "function"){ val <- fun(val) }
+            return(val)
+        })
+    }
+
+    # Go one level deeper
+    if(depth >= 3){
+        R <- lapply(L, function(x){
+            getse(x, inm, depth-1, useregex, fun)
+        })
+    }
+    return(R)
+}
diff --git a/R/gof.R b/R/gof.R
new file mode 100644
index 0000000000000000000000000000000000000000..40862c88b83fb8cfddd38b4277888dcf939b7586
--- /dev/null
+++ b/R/gof.R
@@ -0,0 +1,11 @@
+#' @title Simple wrapper for graphics.off()
+#' @export
+#' 
+gof <- function()
+{
+  graphics.off()
+  ## If rgl is loaded
+  if("package:rgl" %in% search()){
+    rgl.quit()
+  }
+}
diff --git a/R/in_range.R b/R/in_range.R
new file mode 100644
index 0000000000000000000000000000000000000000..0f168cf30f8515201584eccc0841ee6bf682fadf
--- /dev/null
+++ b/R/in_range.R
@@ -0,0 +1,53 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?in_range
+
+#' Returns a logical vector of boolean values where TRUE indicates if timestamp is within the specified period.
+#'
+#' Returns a logical vector of boolean values where TRUE indicates if timestamp is within the specified period spanned by tstart and tend. 
+#'
+#' Note the convention of time stamp in the end of the time intervals causes the time point which equals \code{tstart} not to be included. See last example.
+#'
+#' The times can be given as character or POSIX, per default in tz='GMT'.
+#' 
+#' @title Selects a period
+#' @param tstart The start of the period.
+#' @param time The timestamps as POSIX.
+#' @param tend The end of the period. If not given then the period will have no end. 
+#' @return A logical vector indicating the selected period with TRUE
+#' @name in_range
+#' @examples
+#'
+#' # Take a subset
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#'
+#' # Just a logical returning TRUE in a specified period
+#' in_range("2010-12-20", D$t, "2010-12-22")
+#'
+#' # Set which period to evaluate when optimizing parameters, like in rls_optim()
+#' # (the points with scoreperiod == false are not included in the score evaluation)
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' D$scoreperiod
+#'
+#' # Further, excluding a small period by
+#' D$scoreperiod[in_range("2010-12-26", D$t, "2010-12-27")] <- FALSE
+#' D$scoreperiod
+#'
+#' # Note the convention of time stamp in the end of the time intervals
+#' # causes the point with t = 2010-12-26 00:00:00 not to be included, since it's covering to "2010-12-25 23:00:00" to "2010-12-26 00:00:00"
+#' D$t[in_range("2010-12-26", D$t, "2010-12-27")]
+#'
+#'
+#' @export
+
+in_range <- function(tstart, time, tend=NA) {
+    if (class(tstart)[1] == "character") 
+        tstart <- asct(tstart)
+    if (is.na(tend))
+        tend <- time[length(time)]
+    if (class(tend)[1] == "character") 
+        tend <- asct(tend)
+    asct(tstart) < time & time <= asct(tend)
+}
diff --git a/R/input_class.R b/R/input_class.R
new file mode 100644
index 0000000000000000000000000000000000000000..3c54a8109248d8a3215c5128cdee66fe02816fb7
--- /dev/null
+++ b/R/input_class.R
@@ -0,0 +1,55 @@
+input_class <- R6::R6Class(
+  "input",
+  list(
+      ## Fields:
+      ## The expression as string for transforming the input
+      expr = NA,
+      ## The list  and index counter holding potential state values kept by the function evaluated in the expression
+      state_L = list(),
+      state_i = integer(1),
+      ## The model in which it is included (reference to the R6 forecastmodel object), its needed here,
+      ##   since transformation functions (like AR, ones) need to access information about the model (like kseq)
+      model = NA,
+
+      ## methods
+      initialize = function(expr, model){
+          self$expr <- expr
+          self$model <- model
+      },
+
+      ## Generate (transform) the input by evaluating the expr
+      evaluate = function(data){
+          ## Init the state counter
+          self$state_i <- 0
+          ## Evaluate the expression in an environment with data
+          eval(parse(text = self$expr), data)
+      },
+
+      ## For resetting the state
+      state_reset = function(){
+          ## Init the state counter
+          self$state_i <- 0
+          ## Init the state list
+          self$state_L <- list()
+          ##
+          invisible(NULL)
+      },
+
+      ## Get the saved value in state
+      state_getval = function(initval){
+          self$state_i <- self$state_i + 1
+          if(length(self$state_L) < self$state_i){
+              ## First time called, initiate state variables
+              return(initval)
+          }else{
+              ## Take the state saved last time
+              return(self$state_L[[self$state_i]])
+          }
+      },
+
+      ## Save the state for next time
+      state_setval = function(val){
+          self$state_L[[self$state_i]] <- val
+      }
+  )
+)
diff --git a/R/inputs_class.R-documentation.R b/R/inputs_class.R-documentation.R
new file mode 100644
index 0000000000000000000000000000000000000000..d5770a9fba6b775714a1c77ad58e34874622f59b
--- /dev/null
+++ b/R/inputs_class.R-documentation.R
@@ -0,0 +1,110 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?input_class
+
+#' R6 class for a input_class
+#' 
+#' Description of the class.
+#'
+#' @name input_class
+#' @details
+#' Details of the class.
+#'
+#'
+#' 
+#' @section Public fields:
+#'
+#'      - expr = NA: The expression as string for transforming the input.
+#' 
+#'      - state_L = list(): The list holding potential state values kept by the function evaluated in the expression.
+#'
+#'      - state_i = integer(1):  index counter for the state list.
+#'
+#'
+#----------------------------------------------------------------
+#' @section Public methods:
+#' All public functions are described below and in examples a section for each is included:
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$new(expr)}:
+#' Create a new input with the expression \code{expr}.
+#' 
+#' @examples
+#' # new:
+#'
+#' # An input is created in a forecastmodel
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "lp(Ta, a1=0.9)")
+#' # The 'inputs' is now a list 
+#' model$inputs
+#' # With the input object
+#' class(model$inputs[[1]])
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$evaluate(data}:
+#' Generate (transform) the input by evaluating the expr with the \code{data} (data.list) attached.
+#'
+#' @examples
+#'
+#' # Now the transformation stage can be carried out to create the regression stage data
+#' # Take a data.list subset for the example
+#' D <- subset(Dbuildingheatload, 1:10, kseq=1:4)
+#' # Transform the data
+#' model$inputs[[1]]$evaluate(D)
+#' # What happens is simply that the expression is evaluated with the data
+#' # (Note that since not done in the model some functions are missing)
+#' eval(parse(text=model$inputs[[1]]$expr), D)
+#'
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$state_reset()}:
+#' Each function in the expressions (lp, fs, etc.) have the possibility to save a state, which can be read next time the are called.
+#' 
+#' Reset the state by deleting \code{state_L} and setting \code{state_i} to 0.
+#'
+#'
+#' # After running
+#' model$inputs[[1]]$evaluate(D)
+#' # the lp() has saved it's state for next time
+#' model$inputs[[1]]$state_L
+#' # New data arrives
+#' Dnew <- subset(Dbuildingheatload, 11, kseq=1:4)
+#' # So in lp() the state is read and it continues
+#' model$inputs[[1]]$evaluate(Dnew)
+#'
+#' # If we want to reset the state, which is done in all _fit() functions (e.g. rls_fit), such that all transformations starts from scratch
+#' # Reset the state
+#' model$inputs[[1]]$evaluate(D)
+#' # Test resetting
+#' model$inputs[[1]]$state_reset()
+#' # Now there is no state
+#' model$inputs[[1]]$evaluate(Dnew)
+#' # So lp() starts by taking the first data point
+#' Dnew$Ta
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$state_getval(initval)}:
+#' Get the saved value in state. This function can be used in the beginning of transformation functions to get the current state value.
+#' First time called return the \code{initval}.
+#'
+#' Note that since all transformation functions are called in the same order,
+#' then the state can be read and saved by keeping a counter internally, the value is saved in the field $state_i.
+#'
+#' See example of use in \code{\link{lp}()}.
+#----------------------------------------------------------------
+
+#----------------------------------------------------------------
+#' @section \code{$state_setval(val)}:
+#' Set the state value, done in the end of a transformation function, see above.
+#'
+#' See example of use in \code{\link{lp}()}.
+#' 
+#----------------------------------------------------------------
+NULL
+
diff --git a/R/lag.R b/R/lag.R
new file mode 100644
index 0000000000000000000000000000000000000000..82d2cbbecbb8fd10d3b561c0ab99cb6fdd46f92a
--- /dev/null
+++ b/R/lag.R
@@ -0,0 +1,210 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?lag
+
+lag_vector <- function(x, lag){
+    if (lag > 0) {
+        ## Lag x, i.e. delay x lag steps
+        return(c(rep(NA, lag), x[1:(length(x) - lag)]))
+    }else if(lag < 0) {
+        ## Lag x, i.e. delay x lag steps
+        return(c(x[(abs(lag) + 1):length(x)], rep(NA, abs(lag))))
+    }else{
+        ## lag = 0, return x
+        return(x)
+    }
+}
+
+#' Lagging of a vector by simply the values back or fourth.
+#'
+#' This function lags (shifts) the values of the vector. If \code{lagseq} is a single integer value, then a
+#' vector is returned. If \code{lagseq} is an integer vector, then a data.frame is returned with the columns
+#' as the vectors lagged with the values in lagseq.
+#'
+#' Note that this changes the behaviour of the default \code{\link{lag}()} function.
+#' 
+#' @title Lagging of a vector
+#' @param x The vector to be lagged.
+#' @param lagseq The integer(s) to lag. 
+#' @return A vector or a data.frame.
+#' @name lag
+#' @seealso \code{\link{lag.data.frame}} which is run when \code{x} is a data.frame.
+#' @examples
+#' # The values are simply shifted
+#' # Ahead in time
+#' lag(1:10, 3)
+#' # Back in time
+#' lag(1:10, -3)
+#' # Works but returns a numric
+#' lag(as.factor(1:10), 3)
+#' # Works and returns a character
+#' lag(as.character(1:10), 3)
+#' # Giving several lag values
+#' lag(1:10, c(1:3))
+#' lag(1:10, c(5,3,-1))
+#'
+#' # See also how to lag a forecast data.frame
+#' ?lag.data.frame
+#'
+#' @export
+
+lag.numeric <- function(x, lagseq) {
+    if(length(lagseq) == 1){
+        return(lag_vector(x, lagseq))
+    }else{
+        ## Return a data.frame
+        tmp <- lapply_cbind_df(lagseq, function(lag){
+            return(lag_vector(x, lag))
+        })
+        names(tmp) <- pst("k",lagseq)
+        return(tmp)
+    }
+}
+
+
+#' @export
+lag.factor <- function(x, lagseq) {
+    lag.numeric(x, lagseq)
+}
+
+
+#' @export
+lag.character <- function(x, lagseq) {
+    lag.numeric(x, lagseq)
+}
+
+#' @export
+lag.logical <- function(x, lagseq) {
+    lag.numeric(x, lagseq)
+}
+
+
+#' Lagging of a data.frame
+#'
+#' This function lags the columns with the integer values specified with the argument \code{lagseq}.
+#' 
+#' @title Lagging of a data.frame
+#' @param x 
+#' @param lagseq This 
+#' @return A data.frame which columns are lagged
+#' @name lag.data.frame
+#' @examples
+#' # dataframe of forecasts
+#' X <- data.frame(k1=1:10, k2=1:10, k3=1:10)
+#' X
+#'
+#' # Lag all columns
+#' lag(X, 1)
+#' \dontshow{stop(!all(is.na(lag(X, 1)[1, ])))("Lag all columns didn't work")}
+#'
+#' # Lag each column different steps
+#' lag(X, 1:3)
+#' # Lag each columns with its k value from the column name
+#' lag(X, "+k")
+#' \dontshow{stop(lag(X, 1:3) != lag(X, "+k"))}
+#' # Also works for columns named hxx
+#' names(X) <- gsub("k", "h", names(X))
+#' lag(X, "-h")
+#'
+#' # If not same length as columns in X, then it doesn't know how to lag
+#' \donttest{lag(X, 1:2)}
+#'
+#' \dontshow{
+#' stop(!class(lag(data.frame(k1=1:10), 2) == "data.frame")("Trying to lag data.frame with 1 column, but return is not class data.frame")
+#' stop(!all(dim(lag(data.frame(k1=1:10), "+k")) == c(10,1)))("Trying to lag data.frame with 1 column, but return is not class data.frame")
+#' 
+#' }
+#'
+#' @export
+lag.data.frame <- function(x, lagseq) {
+    X <- x
+    nms <- nams(X)
+    if (length(lagseq) == 1) {
+        if (lagseq %in% c("+k","+h")) {
+            lagseq <- rep(0, length(nms))
+            ## lagseq according to the k value of the columnnames
+            i <- grep("^[k|h][[:digit:]]+$", nms)
+            lagseq[i] <- as.integer(sapply(strsplit(nms[i], "[k|h]"), function(x){ x[length(x)] }))
+        } else if (lagseq %in% c("-k","-h")) {
+            lagseq <- rep(0, length(nms))
+            ## lagseq according to the negative k value of the columnnames
+            i <- grep("^[k|h][[:digit:]]+$", nms)
+            lagseq[i] <- -as.integer(sapply(strsplit(nms[i], "[k|h]"), function(x){ x[length(x)] }))
+        }
+    }
+    if (length(lagseq) > 1) {
+        if(length(lagseq) != ncol(X)){
+            stop(pst("Must have same columns as length of lagseq: data.frame has ",ncol(X)," columns and laqseq is of length ",length(lagseq)))
+        }else{
+            ## lagseq has length equal to the number of columns in X
+            X <- as.data.frame(sapply(1:length(lagseq), function(i) {
+                lag_vector(X[, i], lagseq[i])
+            }))
+            nams(X) <- nms
+         }
+    } else {
+        ## X is a data.frame, but lag is a factor, so lag all
+        lag <- lagseq
+        ## If only one row in X given, then X it is a not a data.frame anymore (code above has changed it)
+        if(is.vector(X)){
+          X <- as.data.frame(lag_vector(X, lag))
+          nams(X) <- nms
+        } else {
+            if (lag > 0) {
+                X[(lag + 1):nrow(X), ] <- X[1:(nrow(X) - lag), ]
+                X[1:lag, ] <- NA
+            } else if (lag < 0) {
+                lag <- -lag
+                X[1:(nrow(X) - lag), ] <- X[(lag + 1):nrow(X), ]
+                X[(nrow(X) - lag + 1):nrow(X), ] <- NA
+            }
+        }
+     }
+    return(X)
+}
+
+#' @export
+lag.matrix <- function(x, lagseq){
+    lag.data.frame(x, lagseq)
+}
+
+## ## Test
+## x <- data.frame(k1=1:5,k2=6:10)
+## ##
+## lag(x, lagseq=1)
+## source("nams.R")
+## lag(as.matrix(x), lagseq=c(1,2))
+## ##
+## lag(x, lagseq="+k")
+## lag(x, "+k")
+## lag(x, "-k")
+
+## lag.data.table <- function(x, nms, lagseq, per_reference = FALSE) {
+##     DT <- x
+##     if (!per_reference) {
+##         ## Don't do it per reference
+##         X <- DT[, ..nms]
+##         for (i in 1:length(lagseq)) {
+##             if (lagseq[i] > 0) {
+##                 X[, `:=`(c(nams(X)[i]), shift(.SD, lagseq[i], NA, "lag")), .SDcols = c(nams(X)[i])]
+##             } else if (lagseq[i] < 0) {
+##                 X[, `:=`(c(nams(X)[i]), shift(.SD, -lagseq[i], NA, "lead")), .SDcols = c(nams(X)[i])]
+##             }
+##         }
+##         return(X)
+##     } else {
+##         ## Here also names of the columns to be shifted should be given Do it per
+##         ## reference
+##         for (i in 1:length(lagseq)) {
+##             if (lagseq[i] > 0) {
+##                 DT[, `:=`(c(nms[i]), shift(.SD, lagseq[i], NA, "lag")), .SDcols = c(nms[i])]
+##             } else if (lagseq[i] < 0) {
+##                 DT[, `:=`(c(nms[i]), shift(.SD, -lagseq[i], NA, "lead")), .SDcols = c(nms[i])]
+##             }
+##         }
+##         invisible(NULL)
+##     }
+## }
+
diff --git a/R/lapply.R b/R/lapply.R
new file mode 100644
index 0000000000000000000000000000000000000000..ebead0f2acf4e99eadbda9ef2b264c22ad10ea65
--- /dev/null
+++ b/R/lapply.R
@@ -0,0 +1,42 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?
+
+#' Helper which does lapply and then cbind
+#' @param X object to apply on
+#' @param FUN function to apply
+#' @export
+lapply_cbind <- function(X, FUN){
+  val <- lapply(X, FUN)
+  return(do.call("cbind", val))
+}
+
+#' Helper which does lapply and then rbind
+#' @param X object to apply on
+#' @param FUN function to apply
+#' @export
+lapply_rbind <- function(X, FUN){
+  val <- lapply(X, FUN)
+  return(do.call("rbind", val))
+}
+
+#' Helper which does lapply, cbind and then as.data.frame
+#' @param X object to apply on
+#' @param FUN function to apply
+#' @export
+lapply_cbind_df <- function(X, FUN){
+  val <- lapply(X, FUN)
+  return(as.data.frame(do.call("cbind", val)))
+}
+
+#' Helper which does lapply, rbind and then as.data.frame
+#' @param X object to apply on
+#' @param FUN function to apply
+#' @export
+lapply_rbind_df <- function(X, FUN){
+  val <- lapply(X, FUN)
+  return(as.data.frame(do.call("rbind", val)))
+}
+
diff --git a/R/lm_fit.R b/R/lm_fit.R
new file mode 100644
index 0000000000000000000000000000000000000000..17a54bf72b73dfef48d4f132076640675ae7ae47
--- /dev/null
+++ b/R/lm_fit.R
@@ -0,0 +1,191 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?lm_fit
+
+#' Fit a linear regression model given a onlineforecast model, seperately for each prediction horizon
+#'
+#' @title Fit an onlineforecast model with \code{\link{lm}}
+#' @param prm as numeric with the parameters to be used when fitting.
+#' @param model object of class forecastmodel with the model to be fitted.
+#' @param data as data.list with the data to fit the model on.
+#' @param scorefun Optional. If scorefun is given, e.g. \code{\link{rmse}}, then the value of this is also returned.
+#' @param returnanalysis as logical determining if the analysis should be returned. See below.
+#' @param printout Defaults to TRUE. Prints the parameters for model.
+#' @return Depends on:
+#' 
+#'     - If \code{returnanalysis} is TRUE a list containing:
+#' 
+#'         * \code{Yhat}: data.frame with forecasts for \code{model$kseq} horizons.
+#'
+#'         * \code{model}: The forecastmodel object cloned deep, so can be modified without changing the original object.
+#' 
+#'         * \code{data}: data.list with the data used, see examples on how to obtain the transformed data.
+#'
+#'         * \code{Lfitval}: a character "Find the fits in model$Lfits", it's a list with the lm fits for each horizon.
+#'
+#'         * \code{scoreval}: data.frame with the scorefun result on each horizon (only scoreperiod is included).
+#'
+#'     - If \code{returnanalysis} is FALSE (and \code{scorefun} is given): The sum of the score function on all horizons (specified with model$kseq).
+#'
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$output <- "y"
+#' model$add_inputs(Ta = "Ta",
+#'                  mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#'
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#'
+#' # Now we can fit the model with RLS and get the model validation analysis data
+#' fit <- lm_fit(prm=NA, model=model, data=D)
+#' # What did we get back?
+#' names(fit)
+#' class(fit)
+#' # The one-step forecast
+#' plot(D$y, type="l")
+#' lines(fit$Yhat$k1, col=2)
+#' # Get the residuals
+#' plot(residuals(fit)$h1)
+#' # Score for each horizon
+#' score_fit(fit)
+#'
+#' # The lm_fit don't put anything in
+#' fit$Lfitval
+#' # Find the lm fits here
+#' model$Lfits
+#' # See result for k=1 horizon
+#' summary(model$Lfits$k1)
+#' # Some diurnal pattern is present
+#' acf(residuals(fit)$h1, na.action=na.pass, lag.max=96)
+#'
+#'
+#' @export
+lm_fit <- function(prm=NA, model, data, scorefun = NA, returnanalysis = TRUE, printout = TRUE){
+    # Check that the model is setup correctly, it will stop and print a message if not
+    model$check(data)
+    
+    # Function for initializing an lm fit:
+    # - it will change the "model" input (since it an R6 class and thus passed by reference
+    # - If scorefun is given, e.g. rmse() then the value of this is returned
+
+    if(printout){
+        # Should here actually only print the ones that were found and changed?
+        cat("----------------\n")
+        if(is.na(prm[1])){
+            cat("prm=NA, so current parameters are used.\n")
+        }else{
+            print(prm)
+        }
+    }
+    # First insert the prm into the model input expressions
+    model$insert_prm(prm)
+
+    # ################################
+    # Since lm_fit is run from scratch, the init the stored inputs data (only needed when running iteratively)
+    model$datatr <- NA
+    model$yAR <- NA
+    
+    # ################################ 
+    # Init the inputs states (and some more is reset)
+    model$reset_state()
+    # Generate the 2nd stage inputs (i.e. the transformed data)
+    datatr <- model$transform_data(data)
+
+    #
+    model$Lfits <- lapply(model$kseq, function(k){
+      # Form the regressor matrix, and lag
+      X <- as.data.frame(subset(datatr, kseq = k, lagforecasts = TRUE))
+      inputnms <- names(X)
+      # Add the model output to the data.frame for lm()
+      X[ ,model$output] <- data[[model$output]]
+      # Generate the formula
+      frml <- pst(model$output, " ~ ", pst(inputnms, collapse=" + "), " - 1")
+      # Fit the model
+      fit <- lm(frml, X)
+      # Return the fit and the data
+      return(fit)
+    })
+    names(model$Lfits) <- pst("k", model$kseq)
+
+    # Calculate the predictions
+    Yhat <- lm_predict(model, datatr)
+
+    # Maybe crop the output
+    if(!is.na(model$outputrange[1])){ Yhat[Yhat < model$outputrange[1]] <- model$outputrange[1] }
+    if(!is.na(model$outputrange[2])){ Yhat[model$outputrange[1] < Yhat] <- model$outputrange[2] }
+
+    #----------------------------------------------------------------
+    # Calculate the result to return
+    # If the objective function (scorefun) is given
+    if(class(scorefun) == "function"){
+        # Do some checks
+        if( !("scoreperiod" %in% names(data)) ){ stop("data$scoreperiod is not set: Must have it set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+        if( all(is.na(data$scoreperiod)) ){ stop("data$scoreperiod is not set correctly: It must be set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+        # Calculate the objective function for each horizon
+        Residuals <- residuals(Yhat, data[[model$output]])
+        scoreval <- sapply(1:ncol(Yhat), function(i){
+            scorefun(Residuals[data$scoreperiod,i])
+        })
+        nams(scoreval) <- nams(Yhat)
+    }else{
+        scoreval <- NA
+    }
+
+    # 
+    if(returnanalysis){
+        # Return the model validation data
+        invisible(structure(list(Yhat = Yhat, model = model$clone_deep(), data = data, Lfitval = "Find the lm fits in model$Lfits", scoreval = scoreval), class = c("forecastmodel_fit","lm_fit")))
+    }else{
+        # Only the summed score returned
+        val <- sum(scoreval, na.rm = TRUE)
+        if(is.na(val)){ stop("Cannot calculate the scorefunction for any horizon") }
+        if(printout){ print(c(scoreval,sum=val))}
+        return(val)
+    }
+
+    ## OLD
+    ## # Is an objective function given?
+    ## if(class(scorefun) == "function" & !returnanalysis){
+    ##     # Do some checks
+    ##     if( !("scoreperiod" %in% names(data)) ){ stop("data$scoreperiod are set: Must have it set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+    ##     if( all(is.na(data$scoreperiod)) ){ stop("data$scoreperiod is not set correctly: It must be set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+    ##     scoreperiod <- data$scoreperiod    
+    ##     # Return the scorefun values
+    ##     scoreval <- sapply(1:ncol(Yhat), function(i){
+    ##         scorefun(Resid[scoreperiod,i])
+    ##     })
+    ##     nams(scoreval) <- nams(Yhat)
+    ##     val <- sum(scoreval, na.rm = TRUE)
+    ##     if(printout){print(c(scoreval,sum=val))}
+    ##     return(val)
+    ## } else if(returnanalysis){
+    ##     # The estimated coefficients
+    ##     Lfitval <- lapply(model$Lfits, function(model){ 
+    ##       coef <- model$coefficients
+    ##       names(coef) <- gsub("(.+?)(\\.k.*)", "\\1", names(coef))
+    ##       return(coef)
+    ##     })
+    ##     # Include score function
+    ##     scoreval <- NA
+    ##     if(class(scorefun) == "function"){
+    ##         # Calculate the objective function for each horizon
+    ##         scoreval <- sapply(1:ncol(Yhat), function(i){
+    ##             scorefun(Resid[,i])
+    ##         })
+    ##         nams(scoreval) <- nams(Yhat)
+    ##     }
+    ##     # Return the model validation data
+    ##     return(list(Yhat = Yhat, t = data$t, Resid = Resid, datatr = datatr, Lfitval = Lfitval, scoreval = scoreval, scoreperiod = data$scoreperiod))
+    ## }
+    ## invisible("ok")
+}
diff --git a/R/lm_optim.R b/R/lm_optim.R
new file mode 100644
index 0000000000000000000000000000000000000000..e27c8182765216d8eff9c72cdf029905d1f5ceb9
--- /dev/null
+++ b/R/lm_optim.R
@@ -0,0 +1,101 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?lm_optim
+
+
+#' Optimize parameters (transformation stage) of LM model
+#'
+#' This is a wrapper for \code{\link{optim}} to enable easy use of bounds and caching in the optimization.
+#' 
+#' @title Optimize parameters for onlineforecast model fitted with LM
+#' @param model The onlineforecast model, including inputs, output, kseq, p
+#' @param data The data.list including the variables used in the model.
+#' @param scorefun The function to be score used for calculating the score to be optimized.
+#' @param cachedir A character specifying the path (and prefix) of the cache file name. If set to \code{""}, then no cache will be loaded or written.
+#' @param printout A logical determining if the score function is printed out in each iteration of the optimization.
+#' @param method The method argument for \code{\link{optim}}.
+#' @param ... Additional parameters to \code{\link{optim}}
+#' @return Result object of optim().
+#' Parameters resulting from the optimization can be found from \code{result$par}
+#' @seealso \code{link{optim}} for how to control the optimization and \code{\link{rls_optim}} which works very similarly.
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+#'                  mu = "ones()")
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#' # Now we can fit the model and get the score, as it is
+#' lm_fit(model=model, data=D, scorefun=rmse, returnanalysis=FALSE)
+#' # Or we can change the low-pass filter coefficient
+#' lm_fit(c(Ta__a1=0.99), model, D, rmse, returnanalysis=FALSE)
+#'
+#' # This could be passed to optim() (or any optimizer). See \code{forecastmodel$insert_prm()} for more details.
+#' optim(c(Ta__a1=0.98), lm_fit, model=model, data=D, scorefun=rmse, returnanalysis=FALSE, lower=c(Ta__a1=0.4), upper=c(Ta__a1=0.999), method="L-BFGS-B")
+#'
+#' # lm_optim is simply a helper, it's makes using bounds easiere and enables caching of the results
+#' # First add bounds for lambda (lower, init, upper)
+#' model$add_prmbounds(Ta__a1 = c(0.4, 0.98, 0.999))
+#'
+#' # Now the same optimization as above can be done by
+#' val <- lm_optim(model, D)
+#' val
+#'
+#' # Caching can be done by providing a path (try rerunning and see the file in "cache" folder)
+#' val <- lm_optim(model, D, cachedir="cache")
+#' val
+#'
+#' # If anything affecting the results are changed, then the cache is not loaded
+#' model$add_prmbounds(Ta__a1 = c(0.7, 0.98, 0.999))
+#' val <- lm_optim(model, D, cachedir="cache")
+#'
+#' # To delete the cache
+#' file.remove(dir("cache", full.names=TRUE))
+#' file.remove("cache")
+#'
+#'
+#' @export
+lm_optim <- function(model, data, scorefun = rmse, init=NA, cachedir="", printout=TRUE, method="L-BFGS-B", ...){
+    ## Take the parameters bounds from the parameter bounds set in the model
+    init <- model$get_prmbounds("init")
+    lower <- model$get_prmbounds("lower")
+    upper <- model$get_prmbounds("upper")
+    # If bounds are NA, then set
+    if(any(is.na(lower))){ lower[is.na(lower)] <- -Inf}
+    if(any(is.na(upper))){ lower[is.na(upper)] <- Inf}
+
+    ## Caching the results based on some of the function arguments
+    if(cachedir != ""){
+        ## Have to insert the parameters in the expressions
+        model$insert_prm(init)
+        ## Give all the elements to calculate the unique cache name
+        cnm <- cache_name(lm_fit, lm_optim, model$outputrange, model$regprm, model$transform_data(data), data[[model$output]], scorefun, init, lower, upper, cachedir = cachedir)
+        ## Maybe load the cached result
+        if(file.exists(cnm)){ return(readRDS(cnm)) }
+    }
+
+    ## Run the optimization
+    res <- optim(par = init,
+                 fn = lm_fit,
+                 model = model,
+                 data = data,
+                 scorefun = scorefun,
+                 printout = printout,
+                 returnanalysis = FALSE,
+                 lower = lower,
+                 upper = upper,
+                 method = method,
+                 ...)
+    ## Save the result in the cachedir
+    if(cachedir != ""){ cache_save(res, cnm) }
+    ## Return the result
+    return(res)
+}
diff --git a/R/lm_predict.R b/R/lm_predict.R
new file mode 100644
index 0000000000000000000000000000000000000000..bff7ba22ede9896019a139536de6b674ce57decc
--- /dev/null
+++ b/R/lm_predict.R
@@ -0,0 +1,59 @@
+#' Use a fitted forecast model to predict its output variable with transformed data.
+#'
+#' See the ??ref(recursive updating vignette).
+#'
+#' @title Prediction with an lm forecast model.
+#' @param model Onlineforecast model object which has been fitted.
+#' @param datatr Transformed data.
+#' @return The Yhat forecast matrix with a forecast for each model$kseq and for each time point in \code{datatr$t}.
+#' @examples
+#'
+#'
+#' # Take data
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "lp(Ta, a1=0.7)", mu = "ones()")
+#'
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#'
+#' # Transform using the mdoel
+#' datatr <- model$transform_data(D)
+#'
+#' # See the transformed data
+#' str(datatr)
+#'
+#' # The model has not been fitted
+#' model$Lfits
+#'
+#' # To fit
+#' lm_fit(model=model, data=D)
+#'
+#' # Now the fits for each horizon are there (the latest update)
+#' # For example 
+#' summary(model$Lfits$k1)
+#'
+#' # Use the fit for prediction
+#' D$Yhat <- lm_predict(model, datatr)
+#'
+#' # Plot it
+#' plot_ts(D, c("y|Yhat"), kseq=1)
+#'
+#'
+#' @export
+lm_predict <- function(model, datatr) {
+  # Calculate the predictions
+  Yhat <- lapply_cbind_df(1:length(model$kseq), function(i){
+    k <- model$kseq[i]
+    fit <- model$Lfits[[i]]
+    # Form the regressor matrix, don't lag
+    X <- as.data.frame(subset(datatr, kseq = k))
+    predict(fit, X)
+  })
+  nams(Yhat) <- pst("k", model$kseq)
+  return(Yhat)
+}
diff --git a/R/long_format.R b/R/long_format.R
new file mode 100644
index 0000000000000000000000000000000000000000..755047b62a7f63a33340429bc4c3443dc36bfc7a
--- /dev/null
+++ b/R/long_format.R
@@ -0,0 +1,37 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?long_format
+
+#' Creates a long format of the predictions
+#'
+#' This functions creates a useful prediction data.frame which can be useful for analysis and plotting.
+#'
+#' 
+#' @title Long format of prediction data.frame
+#' @param fit The result from either lm_fit or rls_fit
+#' @param Time If the timestamps are missing from the fit object
+#' @return Data.frame of when the prediction where made, also the prediction value and timestamp.
+#' @examples
+#'
+#' 
+#' @export
+
+long_format <- function(fit, Time = NULL){
+    if(!("t" %in% names(fit))) {
+        if(is.null(Time)) stop("Missing Time")
+        fit$t <- Time
+    }
+    if(!("Yhat" %in% names(fit))) stop("Missing forecasts")
+  
+    predDF <- do.call(rbind, lapply(1:length(fit$t), function(i)
+    {
+        DF <- data.frame(PredTime = fit$t[i],
+                   Time = fit$t[(i+1):(dim(fit$Yhat)[2]+i)],
+                   k = 1:(dim(fit$Yhat)[2]),
+                   Pred = as.numeric(fit$Yhat[i,]))
+        return(DF)
+    }))
+    return(predDF)
+}
diff --git a/R/lp.R b/R/lp.R
new file mode 100644
index 0000000000000000000000000000000000000000..b1178615cfb9cf4f1eb7fce00d35d8c377cea494
--- /dev/null
+++ b/R/lp.R
@@ -0,0 +1,94 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?lp
+
+#' First-order low-pass filtering of a time series vector.
+#'
+#' This function applies a first order unity gain low-pass filter to the columns of \code{X}.
+#' The low-pass filter is applied to each column seperately. The stationary gain of the filter i one.
+#'
+#' If a list of dataframes (or matrices) is given, then the low-pass filtering is recursively applied on each.
+#' 
+#' @title First-order low-pass filtering
+#' @param X Dataframe or matrix (or list of them) of forecasts in columns to be low-pass filtered.
+#' @param a1 The low-pass filter coefficient.
+#' @param usestate logical: Use the state kept in the model$input? if \code{lp()} is used outside model$transform_data(), then it must be set to FALSE, otherwise the input$state (which is not there) will be read leading to an error.
+#' @return The low-pass filtered dataframe (as a matrix)
+#' @examples
+#' # Make a dataframe for the examples
+#' X <- data.frame(k1=rep(c(0,1),each=5))
+#' X$k2 <- X$k1
+#' Xf <- lp(X, 0.5, usestate=FALSE)
+#' Xf
+#'
+#' # See the input and the low-pass filtered result
+#' plot(X$k1)
+#' lines(Xf[ ,"k1"])
+#' # Slower response with higher a1 value
+#' lines(lp(X, 0.8, usestate=FALSE)[ ,"k1"])
+#'
+#' # If a list of dataframes is given, then lp() is recursively applied on each
+#' lp(list(X,X), 0.5, usestate=FALSE)
+#'
+#' 
+#' @export
+
+lp <- function(X, a1, usestate = TRUE) {
+    ## 
+    if (class(X) == "list") {
+        ## If only one coefficient, then repeat it
+        if (length(a1) == 1) {
+            a1 <- rep(a1, length(X))
+        }
+        ## Call again for each element
+        val <- lapply(1:length(X), function(i) {
+            lp(X[[i]], a1[i], usestate)
+        })
+        nams(val) <- nams(X)
+        return(val)
+    }
+    ## Get the state value saved last time Get the value if it is not the first time,
+    ## it can be a variable of any class
+    yInit <- rep(NA,ncol(X))
+    if(usestate){
+        yInit <- state_getval(initval = yInit)
+    }
+    ## Carry out the function, insert the init value and remove afterwards
+    val <- apply(rbind(yInit, X), 2, lp_vector_cpp, a1 = a1)[-1, , drop = FALSE]
+    ## Keep the state value
+    if(usestate){
+        state_setval(val[nrow(X), ])
+    }
+    ## Return the value
+    return(val)
+}
+
+
+lp_vector <- function(x, a1) {
+    ## Make a 1'st order low pass filter as (5.3) p.46 in the HAN report.
+    y <- numeric(length(x))
+    oma1 <- 1 - a1
+    ## First value in x is the init value
+    y[1] <- x[1]
+    ## 
+    for (i in 2:length(x)) {
+        if (is.na(y[i - 1])) {
+            y[i] <- x[i]
+        } else {
+            y[i] <- a1 * y[i - 1] + (1 - a1) * x[i]
+        }
+    }
+    ## Return (afterwards the init value y[1], must be handled)
+    return(y)
+}
+
+
+## ## Test ##x <- c(rep(0,10),rep(1,10)) x <- rnorm(200) x[5] <- NA lp_vector(x, 0.8)
+## lp_vector_cpp(x, 0.8)
+
+## plot(x) lines(lp_vector_cpp(x, 0.8))
+
+## require(microbenchmark) microbenchmark( lp_vector(x, 0.8), lp_vector_cpp(x, 0.8) )
+
diff --git a/R/make_input.R b/R/make_input.R
new file mode 100644
index 0000000000000000000000000000000000000000..b19d8b96e62304b05b78b38ffcb3ee7cb40af0ba
--- /dev/null
+++ b/R/make_input.R
@@ -0,0 +1,34 @@
+# Do this in a separate tmp.R file to check the documentation
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?make_input
+
+#' Make a forecast matrix (as data.frame) from observations.
+#'
+#' This function creates a data.frame with columns for each horizon such that it can be 
+#' added to a data.list and used in a forecast model.
+#' 
+#' @param observations vector of observations.
+#' @param kseq vector of integers, respresenting the desired "k-steps ahead".
+#' @return Returns a forecast matrix (as a data.frame) with simply the observation vector copied to each column.
+#' @examples
+#'
+#' # Data for example
+#' D <- subset(Dbuildingheatload, c("2010-12-15","2010-12-20"))
+#' 
+#' # Generate the input
+#' make_input(D$heatload, 1:4)
+#'
+#' # Set is in D, such that it can be used in input expressions (e.g. by model$add_inputs(AR = "Ar0")
+#' D$AR0 <- make_input(D$heatload, 1:36)
+#' 
+#' @export
+make_input <- function(observations, kseq){
+    val <- sapply(kseq, function(k){
+        observations
+    })
+    ## set row and column names
+    nams(val) <- paste0('k', kseq)
+    return( as.data.frame(val) )
+}
diff --git a/R/make_tday.R b/R/make_tday.R
new file mode 100644
index 0000000000000000000000000000000000000000..0c3584d093fb11c8485fc138232cfccdc00cd0c5
--- /dev/null
+++ b/R/make_tday.R
@@ -0,0 +1,41 @@
+## Do this in a separate tmp.R file to check the documentation
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?make_tday
+
+#' Make an hour-of-day data.frame with k-step ahead columns.
+#'
+#' This function creates a data.frame with k-steps-ahead values of hour of day,
+#' such that it can be added to a data.list and used inputs to a forecast model.
+#' 
+#' @param time vector of times of class "POSIXct" "POSIXt".
+#' @param kseq vector of integers, respresenting the desired "k-steps ahead".
+#' @param tstep step time of k in seconds.
+#' @param units to return in, e.g. "hours" or "mins"
+#' @return Returns a data.frame with rownames = times, colnames = k1, k2, k5, ...
+#' The content of the data frame is the hour of day, following the setup in "onlineforecast" setup.
+#' @keywords hourofday lags data.frame
+#' @examples
+#' # Create a time sequence
+#' tseq <- seq(asct("2019-01-01"), asct("2019-02-01 12:00"), by=1800)
+#' 
+#' # Make the time of day sequence
+#' make_tday(tseq, 1:10)
+#' 
+#' # With 0.5 hour steps and in minutes
+#' make_tday(tseq, 1:10, tstep=1800, units="mins")
+#'
+#' 
+#' @export
+
+make_tday <- function(time, kseq, tstep=3600, units="hours"){
+    ## The time of day (in the specified units)
+    tday <- sapply(kseq, function(k){
+        tk <- time + k * tstep
+        as.numeric( tk - trunc(tk, units="days"), units=units)
+    })
+    ## set row and column names
+    nams(tday) <- paste0('k', kseq)
+    return( as.data.frame(tday) )
+}
diff --git a/R/nams.R b/R/nams.R
new file mode 100644
index 0000000000000000000000000000000000000000..512e583e160f2ca8ae39f5ba4e5e67fda2f599f7
--- /dev/null
+++ b/R/nams.R
@@ -0,0 +1,44 @@
+#' Return the column names of a dataframe or a matrix.
+#'
+#' Simply to have a single function for returning the column names, instead of
+#' \code{colnames()} for a \code{matrix} and \code{names()} for a \code{data.frame}).
+#' 
+#' @title Return the column names
+#' @param x 
+#' @examples
+#' 
+#' X <- matrix(1, nrow=2, ncol=3)
+#' colnames(X) <- c("c1","c2","c3")
+#' D <- as.data.frame(X)
+#' 
+#' # Annoyingly this fails
+#' \dontrun{names(X)}
+#' # Could use this everywhere
+#' colnames(D)
+#' # but this is shorter
+#' nams(X)
+#' nams(D)
+#'
+#' # Also for assignment
+#' nams(D) <- c("x1","x2","x3")
+#' nams(D)
+#'
+#' @export
+nams <- function(x) {
+    if(is.matrix(x)){
+        colnames(x)
+    } else {
+        names(x)
+    }
+}
+
+
+#' @export
+`nams<-` <- function(x, value) {
+    if(is.matrix(x)){
+        colnames(x) <- value
+    } else {
+        names(x) <- value
+    }
+  x
+}
diff --git a/R/ones.R b/R/ones.R
new file mode 100644
index 0000000000000000000000000000000000000000..2f84ba33771d5a46867d255496e08e257a04ff1c
--- /dev/null
+++ b/R/ones.R
@@ -0,0 +1,37 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?ones
+
+#' Returns a data.frame of ones which can be used in forecast model inputs
+#'
+#' The function returns ones which can be used to generate ones, e.g. to be used as a intercept for a model.
+#'
+#' See ??(ref to mkodel vignette)
+#'
+#' @title Create ones for model input intercept
+#' @return A data.frame of ones
+#' @name ones
+#' @examples
+#'
+#' # A model
+#' model <- forecastmodel$new()
+#' # Use the function in the input definition
+#' model$add_inputs(mu = "ones()")
+#' # Set the forecast horizons
+#' model$kseq <- 1:4
+#' # During the transformation stage the ones will be generated for the horizons
+#' model$transform_data(subset(Dbuildingheatload, 1:7))
+#'
+#' @export
+
+ones <- function(){
+    # To find kseq, get the model (remember it is call per reference, so don't change it without cloning)
+    model <- get("self", parent.env(parent.frame(4)))
+    # Get the data to find the all the names with k in data
+    data <- get("data", parent.env(parent.frame()))
+    n <- length(data$t)
+    # Generate the matrix of ones and return it as a data.frame
+    as.data.frame(matrix(1, nrow=n, ncol=length(model$kseq), dimnames=list(NULL, pst("k",model$kseq))))
+}
diff --git a/R/onlineforecast-package.R b/R/onlineforecast-package.R
new file mode 100644
index 0000000000000000000000000000000000000000..b869b8ca9866f113b45810d14bc1e5b81f6648ea
--- /dev/null
+++ b/R/onlineforecast-package.R
@@ -0,0 +1,9 @@
+#' onlineforecast
+#'
+#' This package provides functions to support forecast models which run in an online setting, like demand, solar and wind power forecasts updated regularly - often hourly up to 48 hours ahead. 
+#'
+#' @docType package
+#' @name onlineforecast
+#' @useDynLib onlineforecast
+#' @importFrom Rcpp sourceCpp
+NULL
diff --git a/R/operator_multiply.R b/R/operator_multiply.R
new file mode 100644
index 0000000000000000000000000000000000000000..d82c4207fb2d2096b72acc8f87dbd7c337d740ce
--- /dev/null
+++ b/R/operator_multiply.R
@@ -0,0 +1,83 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?"%**%"
+
+#' Multiplication of each element in a list (x) with y
+#'
+#' Each element of x is multiplied with y using the usual elementwise '*' operator.
+#'
+#' Typical use is when a function, e.g. \code{\link{bspline}()}, returns a list of matrices (e.g. one for each base spline) and they should individually be multiplied with y (a vector, matrix, etc.).
+#' 
+#' Since this is intended to be used for forecast models in the transformation stage 
+#' then there are some percularities:
+#'
+#' If the number of columns or the names of the columns are not equal for one element in x
+#' and y, then only the columns with same names are used, hence the resulting matrices can be
+#' of lower dimensions.
+#'
+#' See the ??(solar forecast vignette) for example of use
+#' 
+#' @title Multiplication of list with y, elementwise
+#' @param x a list of matrices, data.frames, etc.
+#' @param y a vector, data.frame or matrix
+#' @return A list of same length of x
+#' @examples
+#'
+#' x <- list(matrix(1:9,3), matrix(9:1,3))
+#' x
+#' 
+#' y <- matrix(2,3,3)
+#' y
+#'
+#' x %**% y
+#'
+#' y <- 1:3
+#'
+#' x %**% y
+#'
+#' # Naming percularity
+#' nams(x[[1]]) <- c("k1","k2","k3")
+#' nams(x[[2]]) <- c("k2","k3","k4")
+#' y <- matrix(2,3,3)
+#' nams(y) <- c("k1","k3","k7")
+#'
+#' # Now the only the horizons matching will be used
+#' x %**% y
+#' 
+#' @export
+
+"%**%" <- function(x, y) {
+    if( is.null(dim(y)) ){
+        ## y is not matrix like
+        lapply(x, function(xx) {
+            xx * y
+        })
+    }else{
+        ## y is matrix like
+        lapply(x, function(xx) {
+            ## Check if different horizon k columns
+            colmatch <- TRUE
+            if (ncol(xx) != ncol(y)) {
+                colmatch <- FALSE
+            }else if(any(nams(xx) != nams(y))){
+                colmatch <- FALSE
+            }
+            if(!colmatch){
+                ## Not same columns, take only the k in both
+                nms <- nams(xx)[nams(xx) %in% nams(y)]
+                xx <- xx[, nms]
+                y <- y[, nms]
+            }
+            ## Now multiply
+            val <- xx * y
+            ## Must be data.frame
+            if( is.null(dim(val)) ){
+                val <- data.frame(val)
+                nams(val) <- nms
+            }
+            return(val)
+        })
+    }
+}
diff --git a/R/par_ts.R b/R/par_ts.R
new file mode 100644
index 0000000000000000000000000000000000000000..187db70b4abe4322a717e6556326071d7a448222
--- /dev/null
+++ b/R/par_ts.R
@@ -0,0 +1,113 @@
+# # Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?par_ts
+
+#' Set parameters for \code{\link{plot_ts}()} globally
+#'
+#' Often in a report some plot parameters must be set for all plots, which is done with \code{\link{par}()}.
+#'
+#' The parameters which are general for \code{\link{plot_ts}()} can be set and saved in \code{\link{options}()},
+#' and they will then be applied as default in all calls to plot_ts(). See the examples how to do this.
+#'
+#' If any of these parameters are given to \code{\link{plot_ts}()}, then it will be used over the default.
+#'
+#' @title Set parameters for \code{\link{plot_ts}()}
+#' @param fromoptions logical: Read the parameters set in \code{\link{options}("par_ts")$par_ts}
+#' @param ... any of the following parameters can be set:
+#' @param xnm "t": The name of the time v
+#' @param legendspace 10: Horizontal space for the lengend in character spaces
+#' @param legendcex 1: Scaling of the legend
+#' @param legendrangeshow TRUE: Include the range for each variable in the legend
+#' @param ylimextend c(lower,upper): Extend the ylim for each plot with a proportion, seperately for the lower and upper limit
+#' @param yaxisextend c(lower,upper): Extend the yaxis for each plot with a proportion, seperately for the lower and upper limit
+#' @param mainsline (numeric) with the \code{line} for the main in the plots.
+#' @param cex (numeric) The cex to use for the \code{plot_ts} plots.
+#' @param plotfun The function used for plotting, as default \code{lines}.
+#' @param xaxisformat (character) The format of the xaxis, see \code{\link{strptime}()}.
+#' @param colorramp colorRampPalette: The colorramp used for setting multiple colors in each plot
+#'
+#' @examples
+#'
+#' # Data for plots
+#' D <- subset(Dbuildingheatload, 1:192)
+#'
+#' # See the parameters which can be set
+#' p <- par_ts()
+#' names(p)
+#' p$xnm
+#'
+#' # Using the default values
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24)
+#'
+#' # Set the parameters directly
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24, legendcex=0.8, legendspace=8)
+#'
+#' # Set parameters to be given in a list
+#' p <- par_ts()
+#' p$legendcex <- 0.8
+#' p$legendspace <- 8
+#'
+#' # Use those parameters
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24, p=p)
+#'
+#' # Set globally (if not set specifed the default values will be used)
+#' options(par_ts=p)
+#'
+#' # Now the global parameters will be used
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24)
+#'
+#' # Still providing a parameter directly it will used, e.g. change the plotting function
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24, plotfun=points)
+#'
+#' # Control more precisely the plotting function
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24, plotfun=function(x, ...){ points(x, type="b", ...)})
+#'
+#' # Another colorramp function
+#' p$colorramp <- rainbow
+#' options(par_ts=p)
+#' plot_ts(D, c("heatload","Ta"), kseq=1:24)
+#' 
+#' @export
+par_ts <- function(fromoptions=FALSE, p=NA, ...){
+    # Take the values in options= if they are there
+    if(is.na(p)[1]){
+        if(fromoptions & !is.null(options("par_ts")$par_ts)){
+            p <- options("par_ts")$par_ts
+        }else{
+            # Return a list with the default values
+            p <- list()
+            # Name of the variable for the x axis
+            p$xnm <- "t"
+            # Legend:
+            p$legendspace <- 10       # Space for the legend
+            p$legendcex <- 1          # Cex for the legend
+            p$legendrangeshow <- TRUE # Add the range to the legendtext
+            #
+            p$ylimextend <- c(0,0.1)
+            #
+            p$yaxisextend <- c(0,-0.25)
+            # Default is NA, it will be set depeding on the t in plot_ts_series
+            p$xaxisformat <- NA
+            #
+            p$cex <- 1
+            #
+            p$mainsline <- -1.2
+            # The default plot function, overwrite it with another function to change it
+            p$plotfun <- lines
+            # Color function, can be replaced with in-built or others, e.g. rainbow()
+            p$colorramp <- colorRampPalette(c("black","cyan","purple","blue","red","green"))
+        }
+    }
+    # Replace all the parameters given in ...
+    args <- list(...)
+    nms <- nams(p)[nams(p) %in% nams(args)]
+    if (length(nms) > 0) {
+        for(nm in nms){
+            # If it is a function, the get it parent environment
+            p[[nm]] <- args[[nm]]
+        }
+    }
+    return(p)
+}
diff --git a/R/persistence.R b/R/persistence.R
new file mode 100644
index 0000000000000000000000000000000000000000..accdee8bc4ccfa8ad7bd552b523c68464bd73c9e
--- /dev/null
+++ b/R/persistence.R
@@ -0,0 +1,46 @@
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?persistence
+
+#' Generate persistence and periodic persistence forecasts
+#'
+#' Generate a forecast matrix using persistence. The simple persistence is with the current value of y, i.e. the value at time t is used as forecast
+#'
+#' A seasonal persistence with a specific period can be generated by setting the argument \code{perlen} to the length of the period in steps. The value used for the forecast is then the latest available, which is matches the seasonality for time t+k, see the examples.
+#'
+#' 
+#' @title Generate persistence forecasts
+#' @param y (numeric) The model output to be forecasted.
+#' @param kseq (integer) The horizons to be forecasted.
+#' @param perlen (integer) The period length for seasonal persistence.
+#' @return Forecast matrix as a \code{data.frame} (named \code{Yhat} in similar functions)
+#' @examples
+#'
+#' # Simple persistence just copies the current value for the forecasts
+#' persistence(1:10, kseq=1:4)
+#'
+#' # Seasonal persistence takes the value perlen steps back
+#' persistence(1:10, kseq=1:4, perlen=4)
+#'
+#' # If the horizons are longer than perlen, then it's the latest perlen*i steps back, where i is an integer
+#' persistence(1:10, kseq=1:12, perlen=4)
+#'
+#' 
+#' @export
+
+persistence <- function(y, kseq, perlen=NA){
+    if(is.na(perlen)){
+        # No periodic, so just regular persistance
+        Yhat <- as.data.frame(sapply(kseq, function(k){
+            y
+        }))
+    }else{
+        # A periodic persistence
+        Yhat <- as.data.frame(sapply(kseq, function(k){
+            lag(y, (perlen-k)%%perlen)
+        }))
+    }
+    names(Yhat) <- pst("k",kseq)
+    return(Yhat)
+}
diff --git a/R/plot_ts.R b/R/plot_ts.R
new file mode 100644
index 0000000000000000000000000000000000000000..86899aae71172cf42b83178f16e3ac0c04a45b88
--- /dev/null
+++ b/R/plot_ts.R
@@ -0,0 +1,539 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?plot_ts
+#?plot_ts.data.list
+#?plot_ts.data.frame
+
+#' Plot time series of observations and forecasts (lagged to be aligned in time).
+#'
+#' Generates time series plots depending on the variables matched by each regular expression given in the \code{patterns} argument.
+#' 
+#' The forecasts matrices in the \code{data.list} given in \code{object} will be lagged to be aligned in time (i.e. k-step forecasts will be lagged by k).
+#'
+#' Use the plotly package if argument \code{usely} is TRUE, see \code{\link{plotly_ts}()}.
+#'
+#' @title Time series plotting
+#' @param object A \code{data.list} or \code{data.frame} with observations and forecasts, note diffe
+#' @param patterns A character vector with regular expressions, see examples for use.
+#' @param xlim The time range as a character of length 2 and form "YYYY-MM-DD" or POSIX. Date to start and end the plot.
+#' @param ylims The \code{ylim} for each plot given in a list.
+#' @param xlab A character with the label for the x-axis.
+#' @param ylabs A character vector with labels for the y-axes.
+#' @param mains A character vector with the main for each plot.
+#' @param mainouter A character with the main at the top of the plot (can also be added afterwards with \code{title(main, outer=TRUE)}).
+#' @param legendtexts A list with the legend texts for each plot (replaces the names of the variables).
+#' @param xat POSIXt specifying where the ticks on x-axis should be put.
+#' @param usely If TRUE then plotly will be used.
+#' @param plotit If FALSE then the plot will not be generated, only data returned.
+#' @param p The plot_ts parameters in a list, as generated with the function \code{\link{par_ts}()}.
+#' @param ... Parameters passed to \code{\link{par_ts}}, see the list of parameters in \code{?\link{par_ts}}.
+#' @seealso
+#' \code{\link{par_ts}} for setting plot control parameters.
+#'
+#' \code{\link{regex}} for regular expressions to select which variables to plot.
+#' 
+#' @return A list with a data.frame with the data for each plot, if usely=TRUE, then a list of the figures (drawn with print(subplot(L, shareX=TRUE, nrows=length(L), titleY = TRUE))).
+#'
+#' @examples
+#'
+#' # Time series plots for \code{data.list}, same as for \code{data.frame} except use of \code{kseq}
+#' D <- Dbuildingheatload
+#' plot_ts(D, c("heatload","Ta"), kseq=c(1,24))
+#' # Make two plots (and set the space for the legend)
+#' plot_ts(D, c("heatload","Ta"), kseq=c(1,24), legendspace=11)
+#' # Only the Ta observations 
+#' plot_ts(D, c("heatload","Ta.obs$"), kseq=c(1,24), legendspace=11)
+#'
+#' # Give labels
+#' plot_ts(D, c("heatload","Ta"), kseq=c(1,24), xlab="Time", ylabs=c("Heat (kW)","Temperature (C)"))
+#' # Mains (see mainsline in par_ts())
+#' plot_ts(D, c("heatload","Ta"), kseq=c(1,24), mains=c("Heatload","Temperature"), mainsline=c(-1,-2))
+#'
+#' # Format of the xaxis (see par_ts())
+#' plot_ts(D, c("heatload","Ta"), kseq=c(1,24), xaxisformat="%Y-%m-%d %H:%m")
+#'
+#' # Return the data, for other plots etc.
+#' L <- plot_ts(D, c("heatload","Ta"), kseq=c(1,24))
+#' names(L[[1]])
+#' names(L[[2]])
+#'
+#'
+#' # Use plotly
+#' L <- plot_ts(D, c("heatload","Ta"), kseq=c(1,24), usely=TRUE, xlab="Time", ylabs=c("Heat (kW)","Temperature (C)"))
+#'
+#' # From plotly the figures are returned and can be further manipulated
+#' # e.g. put the legend in the top by
+#' L[[length(L)]] <- L[[length(L)]] %>% layout(legend = list(x = 100, y = 0.98))
+#' print(subplot(L, shareX=TRUE, nrows=length(L), titleY = TRUE))
+#'
+#'
+#' @rdname plot_ts
+#' @export
+plot_ts <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                    mains = "", mainouter="", legendtexts = NA, xat = NA, usely = FALSE, plotit = TRUE, p = NA, ...){
+    UseMethod("plot_ts")
+}
+
+
+
+#' @param kseq For \code{class(object)=="data.list"} an integer vector, default = NA. Control which forecast horizons to include in the plots. If NA all the horizons will be included.
+#' @rdname plot_ts
+#' @export
+plot_ts.data.list <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                              mains = "", mainouter="", legendtexts = NA, xat = NA, usely=FALSE, plotit = TRUE, p=NA, kseq = NA, ...) {
+    # Take par_ts setup parameters from options if there
+    p <- par_ts(fromoptions=TRUE, p=p, ...)
+    #
+    DL <- object
+    # Do a bit of checking
+    if(is.null(DL$t)){ stop("No 't' in the data.list.")}
+    # Should a subset be taken according to xlim?
+    if(!is.na(xlim[1]) | length(xlim) > 1){
+            if(is.na(xlim[1])) { xlim[1] <- DL$t[1] }
+            if(length(xlim) == 1) { xlim[2] <- as.character(DL$t[length(DL$t)]) }
+            DL <- subset(DL, in_range(xlim[1], DL$t, xlim[2]))
+        }
+    # More checking
+    if(length(DL$t) == 0){ stop(pst("No data in the time range. ",xlim[1]," to ",xlim[2]))}
+    # set kseq if not specified
+    if( is.na(kseq[1]) ){
+        tmp <- unique(unlist(lapply(DL, function(x){names(x)})))
+        tmp <- tmp[grep("^[k|h][[:digit:]]+$", tmp)]
+        if( length(tmp) > 0 ){ kseq <- sort(as.integer(gsub("[k|h]","",tmp))) }
+    }
+    # Generate a data.frame with the series to be plotted
+    X <- lapply_cbind_df(patterns, function(pattern) {
+        # Find the variables to plot
+        nms <- grep(pattern, names(DL), value = TRUE)
+        #
+        if(length(nms) == 0){
+            warning("No names where found matching the pattern '",pattern,"'")
+            tmp <- as.data.frame(matrix(NA,nrow=length(DL$t),ncol=1))
+            names(tmp)[1] <- pattern
+            return(tmp)
+        }else{
+            # Go through the names in nms
+            do.call("cbind", lapply(nms, function(nm){
+                if(is.null(dim(DL[[nm]]))) {
+                    # It is a vector, just return it
+                    X <- data.frame(DL[[nm]])
+                    names(X) <- nm
+                    return(X)
+                } else {
+                    # Its a matrix
+                    # Find the columns with 'k' and digits
+                    # Note the convention:
+                    #   - starting with 'k' it's a forecast for t+k, and must be lagged to sync
+                    #   - if it starts with 'h' then it's an observation for the k'th horizon and for
+                    helper <- function(prefix){
+                        i <- which(nams(DL[[nm]]) %in% pst(prefix,kseq))
+                        if(length(i) > 0) {
+                            X <- DL[[nm]][ ,i]
+                            # Started with k, then it's forecasts and must be lagged to sync
+                            if( prefix == "k" ){
+                                ks <- as.integer(gsub("k","",nams(DL[[nm]])[i]))
+                                X <- lag(X, lag=ks)
+                            }
+                            # Fix if it is a vector
+                            if(is.null(dim(X))) {
+                                X <- as.data.frame(X)
+                                names(X) <- nams(DL[[nm]])[i]
+                            }
+                            nams(X) <- pst(nm, "_", nams(X))
+                            return(X)
+                        }else{
+                            return(NULL)
+                        }
+                    }
+                    X <- helper("k")
+                    if(is.null(X[1])){
+                        X <- helper("h")
+                    }
+                    if(is.null(X[1])){
+                        # Not started with "k" or "h": Just take all columns
+                        X <- as.data.frame(DL[[nm]])
+                        names(X) <- pst(nm,"_",names(X))
+                    }
+                    return(X)
+                }
+            }))
+        }
+    })
+    # 
+    if(any(duplicated(nams(X)))){
+        X <- X[ ,unique(nams(X))]
+    }
+    # Add "t" for the x-axis
+    X$t <- DL$t
+    # Since we added "_k" or "_h" to the matrix variables, we have to
+    # strip it off, and pass on as the names to search for with patterns
+    namesdata <- unlist(getse(strsplit(nams(X), "_k|_h"), 1))
+    # Use the plot_ts function which takes the data.frame
+    plot_ts.data.frame(X, patterns, ylims = ylims, xlab = xlab, ylabs = ylabs, mains = mains, mainouter = mainouter,
+                       legendtexts = legendtexts, xat = xat, usely=usely, plotit = plotit, p=p, namesdata=namesdata, ...)
+}
+
+
+# Plot all with prefix
+#' @param namesdata For \code{class(object)=="data.frame"} a character vector. Names of columns in object to be searched in, instead of \code{names(object)}.
+#' @rdname plot_ts
+#' @export
+plot_ts.data.frame <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                               mains = NA, mainouter="", legendtexts = NA, xat = NA, usely=FALSE, plotit = TRUE, p = NA, namesdata=NA, ...) {
+    # Take par_ts setup parameters from options if there
+    p <- par_ts(fromoptions=TRUE, p=p, ...)
+    #
+    data <- object
+    # Do a bit of checking
+    if(nrow(data) == 0){ stop("No rows in the data.frame.")}
+    if(is.null(data[ ,p$xnm])){ warning("No 't' or xnm found. If time is not in 't', then specify it in xnm (either as argument or in options(\"par_ts\").")}
+    #
+    if(!is.null(data[ ,p$xnm])){
+        if(is.na(xlim[1])) { xlim[1] <- data[1,p$xnm] }
+        if(length(xlim)==1) { xlim[2] <- data[nrow(data),p$xnm] }
+        data <- data[in_range(xlim[1], data[ ,p$xnm], xlim[2]), ]
+    }
+    # More checking
+    if(nrow(data) == 0){ stop(pst("No data in the time range. ",xlim[1]," to ",xlim[2]))}
+    # Extend all individual plots vars, if not set
+    if(is.na(mains[1])){ mains <- rep(NA,length(patterns)) }
+    mainsline <- p$mainsline
+    if(is.na(mainsline[1])){ mainsline <- rep(NA,length(patterns)) }
+    if(is.na(ylims[1])){ ylims  <- as.list(rep(NA,length(patterns))) }
+    if(is.na(ylabs[1])){ ylabs  <- rep(NA,length(patterns)) }
+    if(is.na(legendtexts[1])){ legendtexts <- as.list(rep(NA,length(patterns))) }
+    #
+    if(usely){
+        # with plotly
+        if(requireNamespace("plotly", quietly = TRUE)){
+            library("plotly")
+        }else{
+            stop("The plotly package must be installed.")
+        }
+        #
+        Liseq <- list()
+        for(ii in 1:length(patterns)) {
+            Liseq[[ii]] <- plot_ts_iseq(data, patterns[ii], p$xnm, namesdata)
+        }
+        nlines <- length(unlist(Liseq))
+        # Longest name
+        maxchar <- max(nchar(names(data)[unique(unlist(Liseq))]))
+        #colormap <- p$colorramp(nlines)
+        #
+        L <- list()
+        for(ii in 1:length(patterns)) {
+            iseq <- Liseq[[ii]]
+            #
+            fig <- plot_ly(x=data[ ,p$xnm])
+            for(i in 1:length(iseq)){
+                fig <- fig %>% add_lines(y = data[ ,iseq[i]],
+                                         name = names(data)[iseq[i]],
+                                         # color = colormap[iii]
+                                         )#, legendgroup = paste0('group',ii))
+            }
+            if(ii < length(patterns)){
+                # Add empty to make legend gap
+                fig <- fig %>% add_lines(y = rep("NA",nrow(data)), name=strrep("-",maxchar), line=list(color="white"))
+            }
+            # Add ylabs?
+            if(!is.na(ylabs)[1]){
+                fig <- fig %>% layout(yaxis=list(title=ylabs[ii]))
+            }
+            fig <- fig %>% layout(xaxis=list(title=xlab))
+            # Keep it
+            L[[ii]] <- fig
+        }
+        # Center legend
+        L[[ii]] <- L[[ii]] %>% layout(legend = list(x = 100, y = 0.5))
+        # Draw it
+        if(plotit){
+            print(subplot(L, shareX=TRUE, nrows=length(L), titleY = TRUE))
+        }
+        # Return if needed
+        invisible(L)
+    }else{
+        #
+        oldpar <- setpar("ts", mfrow=c(length(patterns),1), cex=p$cex)
+        par(xpd=TRUE, mar = c(0, 4, 0.5, p$legendspace))
+        on.exit(par(oldpar))
+        #
+        L <- lapply(1:length(patterns), function(i){
+            df <- plot_ts_series(data, patterns[i], iplot=i, ylim=ylims[[i]], xlab=xlab, legendtext = legendtexts[[i]],
+                           main=mains[i], mainline=mainsline[i], xat = xat, plotit=plotit, p=p, namesdata=namesdata,
+                           xaxis=(i==length(patterns)), ...)
+            title(mainouter, outer=TRUE)
+            if (!is.na(ylabs[1])){
+                title(ylab = ylabs[i], yaxt = "s")
+            }
+            return(df)
+        })
+        invisible(L)
+    }
+}
+
+#' @rdname plot_ts
+#' @export
+plot_ts.matrix <- plot_ts.data.frame
+
+
+plot_ts_iseq <- function(data, pattern, xnm, namesdata){
+    iseq <- integer(0)
+    # Use these names when finding columns to plot
+    if(is.na(namesdata)[1]){
+        nms <- nams(data)
+    }else{
+        nms <- namesdata
+    }
+    # Find indexes of patterns in data
+    # Do the for loop, to secure the order of the patterns between "|"s
+    for (pf in strsplit(pattern, "\\|")[[1]]) {
+        iseq <- c(iseq, grep(pf, nms))
+    }
+    # Only take unique (keeps the order)
+    iseq <- unique(iseq) 
+    #
+    # Remove p$xnm if in nms
+    iseq <- iseq[!nms[iseq] == xnm]
+    #
+    return(iseq)
+}
+
+
+# Plot all columns found with regex pattern
+plot_ts_series <- function(data, pattern, iplot = 1,
+                           ylim = NA, xlab = "", main = "", mainline = -1.2, legendtext = NA, xat = NA, plotit = TRUE, p = NA, namesdata = NA, xaxis = TRUE, ...) {
+    #
+    # Take par_ts setup parameters from options or defaults
+    p <- par_ts(fromoptions=TRUE, p=p, ...)
+    #
+    iseq <- plot_ts_iseq(data, pattern, p$xnm, namesdata)
+    # Check if p$xnm is in the data
+    if (any(names(data) == p$xnm)) {
+        x <- data[, p$xnm]
+    } else {
+        x <- 1:nrow(data)
+    }
+    #
+    if(plotit){
+        #
+        xlim <- c(min(x)+diff(range(x))*0.02, max(x))
+        if(any(is.na(xlim))){ stop("Could not calculate range of x, probably there is NA in t!") }
+        #
+        if (all(is.na(iseq)) | length(iseq)==0){
+            # No series to plot
+            legendtext <- pst(pattern," not found")
+            colormap <- 1
+            ylim <- c(0,1)
+            yat <- NA
+        }else{
+            # Limits on y-axis
+            if (is.na(ylim[1])) {
+                # For some weird reason range doesn't work with a data.frame of logicals, it works only on a vector of logicals
+                if(all(sapply(iseq, function(i){ is.logical(data[, i]) }))){
+                    # All are logicals
+                    ylim <- c(0,1)
+                }else{
+                    ylim <- range(data[, iseq], na.rm = TRUE)
+                    if(any(is.na(ylim))){
+                        legendtext <- pst(pattern," all NA")
+                        colormap <- 1
+                        ylim <- c(0,1)
+                        yat <- NA
+                    }
+                }
+            }
+            #
+            colormap <- p$colorramp(length(iseq))
+            #
+            # EXTEND THE YLIM: to make room for multiple plots
+            ylim <- ylim + c(-1,1) * diff(ylim) * p$ylimextend
+            # for axis
+            ylimmod <- ylim + c(-1,1) * diff(ylim) * p$yaxisextend
+            #
+            # Make the legend text
+            if (p$legendrangeshow & is.na(legendtext[1])){
+                rngtext <- do.call("rbind", lapply(1:length(iseq), function(i) {
+                    tmp <- sapply(range(data[, iseq[i]], na.rm = TRUE), function(x){
+                        if( x <= 10 ){ return(signif(x, digits = 2)) }else{ return(round(x)) } })
+                    gsub("\\s+"," ",paste(tmp, collapse = " to "), perl=TRUE)
+                }))
+                legendtext <- paste0(nams(data)[iseq], ": ", rngtext)
+            }else{
+                legendtext <- paste0(nams(data)[iseq])
+            }
+        }
+        #
+        plot(x, x, type = "n", xlim = xlim, ylim = ylim, xaxs="r", yaxt = "n", bty = "n",
+             xlab = "", ylab = "")
+        # For grid
+        xb <- c(xlim[1]-0.04*diff(xlim), xlim[2]+0.01*diff(xlim))
+        # yb <- c(ylim[1]-0.04*diff(ylim), ylim[2]+0.01*diff(ylim))
+        if(all(sapply(data[ ,iseq], class) == "logical")){
+            # Its all logical
+            yb <- c(0,1)
+        }else{
+            yb <- range(data[ ,iseq], na.rm=TRUE) #c(ylim[1]-0.04*diff(ylim), max(data[ ,iseq],na.rm=TRUE))#, ylim[2]+0.01*diff(ylim))
+        }
+        # BACKGROUND
+        # polygon(c(xb[1],xb,rev(xb)), c(yb,rev(yb),yb[1]), col="grey95", lty=0)
+        # GRID
+        if(is.na(xat)){
+            xat <- pretty(x)
+            irm <- which(xat < min(x) | xat > max(x))
+            if(length(irm)){ xat <- xat[-irm] }
+        }
+        if(!"yat" %in% ls()){
+            # Where to put y grid and labels
+            yat <- pretty(ylimmod, 3)
+            yat <- yat[(ylim[1]-0.04*diff(ylim)) < yat]
+            yat <- yat[yat <= ylim[2]]
+            sapply(yat, function(y){ lines(xb,c(y,y), col="lightgrey", lty="dotted") })
+            axis(2, yat, lwd=0, lwd.ticks=1)
+            lines(rep(xb[1],2), range(yat))
+        }
+        # horizontal grid
+        ygrid <- c(ylim[1]-0.04*diff(ylim), max(yb,yat))
+        sapply(xat, function(x){ lines(c(x,x),ygrid,col="lightgrey",lty="dotted") })
+        #
+        # PLOT LINES
+        if (!all(is.na(iseq))){
+            for (i in 1:length(iseq)) {
+                p$plotfun(x, data[, iseq[i]], col = colormap[i])
+            }
+        }
+        title(main = main, line=mainline)
+        # 
+        # Make the xaxis
+        if( xaxis ){
+            if (any(nams(data) == p$xnm)) {
+                if (class(data[ ,p$xnm])[1] != "POSIXct") {
+                    axis(1, data[ ,p$xnm], xaxt = "s")
+                } else {
+                    # makes too few ticks: axis.POSIXct(1, data[ ,p$xnm], format = xaxisformat, xaxt = "s")
+                    if(is.na(xat[1])){ xat <- pretty(data[ ,p$xnm]) }
+                    # Format, per default NA, so make it here
+                    xaxisformat <- p$xaxisformat
+                    if(is.na(xaxisformat)){
+                        if( all(as.numeric(xat,unit="secs") %% (24*3600) == 0) ){
+                            xaxisformat <- "%Y-%m-%d"
+                        }else{
+                            xaxisformat <- "%Y-%m-%d %H:%M"
+                        }
+                    }
+                    axis.POSIXct(1, data[ ,p$xnm], at = xat, format = xaxisformat, xaxt = "s", lwd=1, lwd.ticks=1)
+                }
+            } else {
+                axis(1, 1:nrow(data), xaxt = "s", lwd=0, lwd.ticks=1)
+            }
+            mtext(xlab, 1, line=par()$mgp[1]-0.5)
+        }
+        #
+        legend(x=xlim[2]+2*0.01*diff(xlim), y=ylim[2], legend=legendtext, lty = 1, col = colormap, cex = p$legendcex, bg="white", box.lwd=0.5)
+    }
+    #
+    invisible(cbind(t=x, data[, iseq]))
+}
+
+
+
+#' Plot forecasts, residuals, cumulated residuals and RLS coefficients
+#'
+#' A useful plot for residual analysis and model validation of an RLS fitted forecast model.
+#'
+#' All parameters, except those described below, are simply passed to \code{\link{plot_ts}()}.
+#' 
+#' @title Plots for an rls_fit.
+#' @param fit An \code{rls_fit}.
+#' @param patterns See \code{\link{plot_ts}}. The default pattern finds the generated series in the function, '!!RLSinputs!!' will be replaced with the names of the RLS inputs (regression stage inputs).
+#' @return The plotted data in a \code{data.list}.
+#'
+#' @seealso \code{\link{plot_ts}}.
+#' @examples
+#'
+#' # Fit a model (see vignette 'setup-and-use-model'
+#' D <- Dbuildingheatload
+#' D$scoreperiod <- in_range("2010-12-22", D$t)
+#' model <- forecastmodel$new()
+#' model$output = "heatload"
+#' model$add_inputs(Ta = "Ta",
+#'                  mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.9)")
+#' model$kseq <- c(3,18)
+#' fit1 <- rls_fit(NA, model, D, returnanalysis = TRUE)
+#'
+#' # Plot it
+#' plot_ts(fit1)
+#'
+#' # Plot it with plotly
+#' plot_ts(fit1, usely=TRUE)
+#'
+#' # Return the data
+#' Dplot <- plot_ts(fit1)
+#'
+#' # The RLS coefficients are now in a nice format
+#' head(Dplot$mu)
+#'
+#' @export
+plot_ts.rls_fit <- function(fit, patterns = c("^y$|^Yhat$","^Residuals$","CumAbsResiduals$",pst("^",names(fit$Lfitval[[1]]),"$")),
+                          xlim = NA, ylims = NA, xlab = "", ylabs = NA, mains = "", mainouter="", legendtexts = NA,
+                            xat = NA, usely=FALSE, plotit=TRUE, p=NA, kseq = NA, ...){
+    # Calculate the residuals
+    Residuals <- residuals(fit)
+    # Prepares a data.list for the plot
+    if(is.na(xlim[1])){
+        if(!("scoreperiod" %in% names(fit$data))){
+            isubset <- c(1,length(fit$data$t))
+        }else{
+            # Default is to plot the scoreperiod if there
+            isubset <- which(fit$data$scoreperiod)
+        }
+        isubset <- min(isubset,na.rm=TRUE):max(isubset,na.rm=TRUE)#1:length(fit$data$t)
+    }else{
+        isubset <- in_range(xlim[1], fit$data$t, xlim[2])
+    }
+    if(is.na(kseq[1])){
+        kseq <- fit$model$kseq
+    }
+    #
+    CumAbsResiduals <- onlineforecast:::lapply_cbind_df(kseq, function(k){
+        tmp <- abs(Residuals[isubset,pst("h",k)])
+        tmp[is.na(tmp)] <- 0
+        cumsum(tmp)
+    })
+    names(CumAbsResiduals) <- pst("h",kseq)
+    #
+    # Convert the parameter estimates
+    nmsinput <- names(fit$Lfitval[[1]])
+    tmp <- lapply(1:length(nmsinput), function(i){
+        # Name of the input
+        nm <- names(fit$Lfitval[[1]])[i]
+        # Take the fitues each horizon for the input
+        ik <- which(names(fit$Lfitval) %in% pst("k",kseq))
+        X <- onlineforecast:::lapply_cbind_df(ik, function(ii){
+            fit$Lfitval[[ii]][isubset,nm]
+        })
+        names(X) <- gsub("k","h",names(fit$Lfitval)[ik])
+        return(X)
+    })
+    names(tmp) <- nmsinput
+    #
+    data <- list(y=fit$data[[fit$model$output]][isubset], Yhat=fit$Yhat[isubset,pst("k",kseq)], Residuals=Residuals[isubset,pst("h",kseq)], CumAbsResiduals=CumAbsResiduals)
+    data <- c(data,tmp)
+    data$t <- fit$data$t[isubset]
+    class(data) <- "data.list"
+    if(plotit){
+        #
+        if(is.na(ylabs[1])){
+            ylabs <- c("Model output","Residuals","Cum. residuals",pst("Coef: ",nmsinput))
+        }
+        # The input names
+        #patterns[patterns == "!!RLSinputs!!"] <- pst("^",nmsinput,"$"))
+        # Make a plot of the RLS coefficients for each horizon
+        plot_ts(data, patterns, xlim = xlim, ylims = ylims, xlab = xlab, ylabs = ylabs,
+                mains = mains, mainouter=mainouter, legendtexts = legendtexts, xat = xat, usely=usely, p=p, kseq = kseq, ...)
+    }
+    # Return the data
+    invisible(data)
+}
diff --git a/R/plotly_ts.R b/R/plotly_ts.R
new file mode 100644
index 0000000000000000000000000000000000000000..70629801393122a8df60d43d78f52ccc8f410349
--- /dev/null
+++ b/R/plotly_ts.R
@@ -0,0 +1,48 @@
+## Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?plotly_ts
+#?plotly_ts.data.frame
+
+#' Plot time series of observations and predictions, with correct lagging for onlineforecast type of data structures.
+#'
+#' Simply the same as \code{\link{plot_ts}()} with \code{usely=TRUE}, such that plotly is used.
+#'
+#' The \code{plotly} package must be loaded.
+#'
+#' Note that the plot parameters set with \code{\link{par_ts}()} have no effect on the \code{plotly} plots.
+#'
+#' @seealso
+#' \code{\link{plot_ts}}
+#' @examples
+#'
+#' D <- Dbuildingheatload
+#' plotly_ts(D, c("heatload","Ta"), kseq=c(1,24))
+#' plotly_ts(D, c("heatload","Ta"), kseq=c(1,24))
+#' plotly_ts(D, c("heatload","Ta$|Ta.obs$"), kseq=c(1,24))
+#'
+#' @export
+
+plotly_ts <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                    mains = "", mainouter="", legendtexts = NA, xat = NA, usely = FALSE, p = NA, ...){
+    UseMethod("plotly_ts")
+}
+
+#' @export
+plotly_ts.data.list <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                              mains = "", mainouter="", legendtexts = NA, xat = NA, usely=TRUE, p=NA, kseq = NA, ...) {
+    plot_ts.data.list(object=object, patterns=patterns, xlim = xlim, ylims = ylims, xlab = xlab, ylabs = ylabs,
+                      mains = mains, mainouter=mainouter, legendtexts = legendtexts, xat = xat, usely = usely, p = p, kseq=kseq, ...)
+}
+
+#' @export
+plotly_ts.data.frame <- function(object, patterns=".*", xlim = NA, ylims = NA, xlab = "", ylabs = NA,
+                              mains = "", mainouter="", legendtexts = NA, xat = NA, usely=TRUE, p=NA, namesdata=NA, ...) {
+    plot_ts.data.frame(object=object, patterns=patterns, xlim = xlim, ylims = ylims, xlab = xlab, ylabs = ylabs,
+                      mains = mains, mainouter=mainouter, legendtexts = legendtexts, xat = xat, usely = usely, p = p, namesdata=namesdata, ...)
+}
+
+## plotly_ts.rls_fit <- function(fit, xlim=NA, kseq=NA, plotit=TRUE){
+##     plotly_ts.rls_fit(fit, xlim=xlim, kseq=kseq, plotit=plotit, usely=TRUE)
+## }
diff --git a/R/pst.R b/R/pst.R
new file mode 100644
index 0000000000000000000000000000000000000000..00ed6b99e246cb482bb7d37883e41b807ef85fae
--- /dev/null
+++ b/R/pst.R
@@ -0,0 +1,6 @@
+#' @title Simple wrapper for past0()
+#' @export
+
+pst <- function(...) {
+    paste0(...)
+}
diff --git a/R/resample.R b/R/resample.R
new file mode 100644
index 0000000000000000000000000000000000000000..63a78f13a84128f7f7d7098fbb1cab191625a6c9
--- /dev/null
+++ b/R/resample.R
@@ -0,0 +1,110 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?resample
+#?resample.data.frame
+
+#' Make a downsampling to a lower sampling frequency
+#'
+#' Given an object with a column indicating the time points of the observations the
+#' function returns a similar object, where the function is applied for each new (and longer)
+#' interval.
+#'
+#' Typically it is used if for example 15 minute values should be made into 1 hour values.
+#'
+#' NOTE that it is always assumed that the time point is at the end of the time interval,
+#' e.g. if hourly values are returned, then "2019-01-01 01:00" indicates the first hour in 2019.
+#'
+#' All time points at the time point (border) of between two intervals is assigned to the
+#' first interval of the two.
+#' 
+#' @title Resampling to equidistant time series
+#' @param object Can be data.frame
+#' @param ts (numeric) New sample period in seconds
+#' @param tstart A POSIXxx (or charater or numeric), which indicates the first time point in the series returned
+#' @param tend A POSIXxx (or charater or numeric), which indicates the last time point in the series returned
+#' @param timename (character) The name of the time column in object
+#' @param fun (function) The function of apply. Default is mean, such that average values are obtained
+#' @param quantizetime (logical) Should the new time points be set to the end of the time intervals, or should they also be the result of the fun function
+#' @param ... Passed on to the fun function
+#' @return A downsampled data.frame
+#' @examples
+#'
+#' # Generate some test data with 10 minutes sampling frequency for one day
+#' X <- data.frame(t=seq(asct("2019-01-01 00:10"),asct("2019-01-02"), by=10*60))
+#' 
+#' # A single sine over the day
+#' X$val <- sin(as.numeric(X$t)/3600*2*pi/(24))
+#'
+#' # Resample to hourly average values
+#' Xre <- resample(X, 3600)
+#' plot(X$t, X$val)
+#' lines(Xre$t, Xre$val, type="b", col=2)
+#'
+#' # Resample to hourly max values
+#' Xre <- resample(X, 3600, fun=max)
+#' lines(Xre$t, Xre$val, type="b", col=3)
+#'
+#' # Another starting time point
+#' Xre <- resample(X, 3600, tstart="2019-01-01 00:30")
+#' lines(Xre$t, Xre$val, type="b", col=4)
+#'
+#' 
+#' @export
+resample <- function(object, ts, tstart=NA, tend=NA, timename="t", fun=mean, quantizetime=TRUE, ...){
+    UseMethod("resample")
+}
+
+#' @export
+resample.data.frame <- function(object, ts, tstart=NA, tend=NA, timename="t", fun=mean, quantizetime=TRUE, ...)
+{
+    X <- object
+    # ----------------------------------------------------------------
+    # Do nothing if ts is NA
+    if(is.na(ts)){ return(X) }
+    
+    # ----------------------------------------------------------------
+    # If no start time. The start time will be set to the first value, floored with ts
+    if(is.na(tstart)){ tstart <- X[1,timename] - as.numeric(X[1,timename],"secs") %% ts }
+    # If no end time is given then set it
+    if(is.na(tend)){ tend <- X[nrow(X),timename]}
+
+    # ----------------------------------------------------------------
+    # Convert to POSIXct
+    tstart <- asct(tstart)
+    tend <- asct(tend)
+    
+    # ----------------------------------------------------------------
+    # Cut out the time period
+    X <- X[tstart<X[,timename] & X[,timename]<=tend,]
+    # Remove values with a NA value in time
+    X <- X[!is.na(X[,timename]), ]
+    
+    # ----------------------------------------------------------------
+    # Split into periods of length ts, and take the fun function of each period
+    X[ ,timename] <- (as.numeric(X[ ,timename], units="secs") - as.numeric(tstart, units="secs"))
+    iSplit <- -(X[ ,timename] %/% -ts)
+    # Do the resampling
+    Xres <- aggregate(X, list(iSplit), fun, ...)
+    # Remove the "Group" column
+    Xres <- Xres[,-1]
+    # Convert time to POSIXct
+    Xres[ ,timename] <- tstart + Xres[ ,timename]
+
+    # Include intervals with NA in the result
+    Xres <- cbind(Xres,iSplit=unique(iSplit))
+    iSplit <- 1:-((as.numeric(tend, units="secs")-as.numeric(tstart, units="secs")) %/% -ts)
+    withNA <- data.frame(iSplit=iSplit)
+    Xres <- merge(Xres,withNA,all=TRUE)
+    # Remove the iSplit column
+    Xres <- Xres[,-match("iSplit",names(Xres))]
+    if(quantizetime)
+    {
+        # Set the time points to the end of each interval
+        time <- seq(tstart,by=ts,length.out=nrow(Xres)) + ts
+        Xres[,timename] <- time
+    }
+    
+    return(Xres)
+}
diff --git a/R/residuals.R b/R/residuals.R
new file mode 100644
index 0000000000000000000000000000000000000000..45b5bb963f40077ed9f1054aa66a4695b6071a18
--- /dev/null
+++ b/R/residuals.R
@@ -0,0 +1,58 @@
+#' Calculate the residuals given a forecast matrix and the observations.
+#'
+#' Simply give the forecast matrix and the observations to get the residuals for each horizon in the forecast matrix.
+#'
+#' The residuals returned are synced with the observations (i.e. k0) and the columns are names "hxx" (not kxx) to indicate this and will not be lagged in \code{\link{plot_ts}()}.
+#' 
+#' @title Calculate the residuals given a forecast matrix and the observations.
+#' @param Yhat The forecast matrix (with kxx as column names).
+#' @param y The observations vector
+#' @return A data.frame with the residuals for each horizon
+#'
+#' @examples
+#' # Just a vector to be forecasted
+#' n <- 100
+#' D <- data.list()
+#' D$t <- 1:n
+#' D$y <- c(filter(rnorm(n), 0.95, "recursive"))
+#' plot(D$y, type="l")
+#' 
+#' # Generate a forecast matrix with a simple persistence model
+#' D$Yhat <- persistence(D$y, kseq=1:4)
+#' 
+#' # The residuals for each horizon
+#' D$Resid <- residuals(D$Yhat, D$y)
+#' D$Resid
+#' # Note the names of the columns
+#' names(D$Resid)
+#' # which means that they are aligned with the observations and will not be lagged in the plot
+#' plot_ts(D, c("y|Yhat","Resid"))
+#'
+#' Check that it matches (the forecasts is lagged in the plot_ts such that the forecast for t+k is at t+k (and not t))
+#' plot_ts(D, c("y|Yhat","Resid"), xlim=c(1,10), kseq=1, plotfun=function(x,...){lines(x,...,type="b")})
+#'
+#' # Just for fun, see the auto-correlation function of the persistence 
+#' acf(D$Resid$h1, na.action=na.pass)
+#' acf(D$Resid$h4, na.action=na.pass)
+#'
+#' @rdname residuals
+#' @export
+residuals.data.frame <- function(Yhat, y){
+    # Add some checking at some point
+    Residuals <- y - lag(Yhat, "+k")
+    # Named with hxx (it's not a forecast, but an observation available at t)
+    names(Residuals) <- gsub("k","h",names(Residuals))
+    #
+    return(Residuals)
+}
+
+#' @rdname residuals
+#' @export
+residuals.matrix <- residuals.data.frame
+
+#' @rdname residuals
+#' @param fit The value from a fit a forecastmodel (currently \code{\link{lm_fit}} or \code{\link{rls_fit}}.
+#' @export
+residuals.forecastmodel_fit <- function(fit){
+    residuals(fit$Yhat, fit$data[[fit$model$output]])
+}
diff --git a/R/rls_fit.R b/R/rls_fit.R
new file mode 100644
index 0000000000000000000000000000000000000000..7dfb91ebf53f203f967651ca1dc46a7f118dd262
--- /dev/null
+++ b/R/rls_fit.R
@@ -0,0 +1,212 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?rls_fit
+
+#' This function fits the onlineforecast model to the data and returns either: model validation data or just the score value.
+#'
+#' 
+#' This function has three main purposes (in the examples these three are demonstrated in the examples):
+#' 
+#' - Returning model validation data, such as residuals and recursive estimated parameters.
+#' 
+#' - For optimizing the parameters using an R optimizer function. The parameters to optimize for is given in \code{prm}
+#'
+#' - Fitting a model to data and saving the final state in the model object (such that from that point the model can be updated recursively as new data is received).
+#' 
+#' Note, if the \code{scorefun} is given the \code{data$scoreperiod} must be set to (int or logical) define which points to be evaluated in the scorefun.
+#' 
+#' @title Fit an onlineforecast model with Recursive Least Squares (RLS).
+#' @param prm vector with the parameters for fitting. Deliberately as the first element to be able to use \code{\link{optim}} or other optimizer. If NA then the model will be fitted with the current values in the input expressions, see examples.
+#' @param model as an object of class forecastmodel: The model to be fitted.
+#' @param data as a data.list with the data to fit the model on.
+#' @param scorefun as a function (optional), default is \code{\link{rmse}}. If the score function is given it will be applied to the residuals of each horizon (only data$scoreperiod is included).
+#' @param returnanalysis as a logical. If FALSE then the sum of the scoreval on all horizons are returned, if TRUE a list with values for analysis.
+#' @param runcpp logical: If true the c++ implementation of RLS is run, if false the R implementation is run (slower).
+#' @param printout logical: If TRUE the offline parameters and the score function value are printed.
+#' @return Depends on:
+#' 
+#'     - If \code{returnanalysis} is TRUE a list containing:
+#' 
+#'         * \code{Yhat}: data.frame with forecasts for \code{model$kseq} horizons.
+#'
+#'         * \code{model}: The forecastmodel object cloned deep, so can be modified without changing the original object.
+#' 
+#'         * \code{data}: data.list with the data used, see examples on how to obtain the transformed data.
+#'
+#'         * \code{Lfitval}: list with RLS coefficients in a data.frame for each horizon, use \code{\link{plot_ts.rls_fit}} to plot them and to obtain them as a data.frame for each coefficient.
+#'
+#'         * \code{scoreval}: data.frame with the scorefun result on each horizon (only scoreperiod is included).
+#'
+#'     - If \code{returnanalysis} is FALSE (and \code{scorefun} is given): The sum of the score function on all horizons (specified with model$kseq).
+#'
+#' @seealso
+#' For optimizing parameters \code{\link{rls_optim}()}, for summary \code{summary.rls_fit}, for plotting \code{\link{plot_ts.rls_fit}()}, and the other functions starting with 'rls_'.
+#'
+#' @examples
+#'
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$output <- "y"
+#' model$add_inputs(Ta = "Ta",
+#' 		    mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#'
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#'
+#' # Now we can fit the model with RLS and get the model validation analysis data
+#' fit <- rls_fit(model = model, data = D)
+#' # What did we get back?
+#' names(fit)
+#' # The one-step forecast
+#' plot(D$y, type="l")
+#' lines(fit$Yhat$k1, col=2)
+#' # The one-step RLS coefficients over time (Lfitval is a list of the fits for each horizon)
+#' plot(fit$Lfitval$k1$Ta, type="l")
+#'
+#' # A summary
+#' summary(fit)
+#' # Plot the fit
+#' plot_ts(fit, kseq=1)
+#'
+#' # Fitting with lower lambda makes the RLS coefficients change faster
+#' fit2 <- rls_fit(prm = c(lambda=0.9), model, D)
+#' plot_ts(fit2, kseq=1)
+#'
+#'
+#' # It can return a score
+#' rls_fit(c(lambda=0.9), model, D, scorefun=rmse, returnanalysis=FALSE)
+#'
+#' # Such that it can be passed to an optimzer (see ?rls_optim for a nice wrapper of optim)
+#' val <- optim(c(lambda=0.99), rls_fit, model = model, data = D, scorefun = rmse, returnanalysis=FALSE)
+#' val$par
+#' # Which can then simply be applied
+#' rls_fit(val$par, model, D, scorefun=rmse, returnanalysis=FALSE)
+#' # see ?rls_optim, how optim is wrapped for a little easiere use
+#'
+#' # See rmse as a function of horizon
+#' fit <- rls_fit(val$par, model, D, scorefun = rmse)
+#' plot(fit$scoreval, xlab="Horizon k", ylab="RMSE")
+#' # See ?score_fit for a little more consistent way of calculating this
+#'
+#'
+#' # Try adding a low-pass filter to Ta
+#' model$add_inputs(Ta = "lp(Ta, a1=0.92)")
+#' # To obtain the transformed data, i.e. the data which is used as input to the RLS
+#' model$reset_state()
+#' # Generate the the transformed data
+#' datatr <- model$transform_data(D)
+#' # What did we get?
+#' str(datatr)
+#' # See the effect of low-pass filtering
+#' plot(D$Ta$k1, type="l")
+#' lines(datatr$Ta$k1, col=2)
+#' # Try changing the 'a1' coefficient and rerun
+#' # ?rls_optim for how to optimize also this coefficient
+#'
+#' 
+#' @export
+
+rls_fit <- function(prm=NA, model, data, scorefun = NA, returnanalysis = TRUE,
+                    runcpp = TRUE, printout = TRUE){
+    # Check that the model is setup correctly, it will stop and print a message if not
+    model$check(data)
+    
+    # Function for initializing an rls fit:
+    # - it will change the "model" input (since it an R6 class and thus passed by reference
+    # - If scorefun is given, e.g. rmse() then the value of this is returned
+    #
+
+    if(printout){
+        # Should here actually only print the ones that were found and changed?
+        cat("----------------\n")
+        if(is.na(prm[1])){
+            cat("prm=NA, so current parameters are used.\n")
+        }else{
+            print(prm)
+        }
+    }
+
+    # First insert the prm into the model input expressions
+    model$insert_prm(prm)
+
+    # Since rls_fit is run from scratch, the init the stored inputs data (only needed when running iteratively)
+    model$datatr <- NA
+    model$yAR <- NA
+
+    # Reset the model state (e.g. inputs state, stored iterative data, ...)
+    model$reset_state()
+    # Generate the 2nd stage inputs (i.e. the transformed data)
+    datatr <- model$transform_data(data)
+
+    # Initialize the fit for each horizon
+    # Need to know how many inputs to be fitted with?
+    np <- length(datatr)
+
+    #
+    model$Lfits <- lapply(model$kseq, function(k){
+        fit <- list(k = k,
+                    # Init values for the parameter vector
+                    theta = matrix(rep(0,np), ncol = 1))
+        if(runcpp){
+            # cpp rls version use covariance P
+            fit$P <- diag(10000,np)
+        }else{
+            # rls version use inverse covariance R
+            fit$R <- diag(1/10000,np)
+        }
+        #
+        return(fit)
+    })
+    names(model$Lfits) <- pst("k", model$kseq)
+
+    # Calculate the parameter estimates for each time point
+    Lresult <- rls_update(model, datatr, data[[model$output]], runcpp)
+    Yhat <- lapply_cbind_df(Lresult, function(x){
+        x$yhat
+    })
+    nams(Yhat) <- pst("k",model$kseq)
+
+    # Maybe crop the output
+    if(!is.na(model$outputrange[1])){ Yhat[Yhat < model$outputrange[1]] <- model$outputrange[1] }
+    if(!is.na(model$outputrange[2])){ Yhat[Yhat > model$outputrange[2]] <- model$outputrange[2] }
+
+    #----------------------------------------------------------------
+    # Calculate the result to return
+    # If the objective function (scorefun) is given
+    if(class(scorefun) == "function"){
+        # Do some checks
+        if( !("scoreperiod" %in% names(data)) ){ stop("data$scoreperiod is not set: Must have it set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+        if( all(is.na(data$scoreperiod)) ){ stop("data$scoreperiod is not set correctly: It must be set to an index (int or logical) defining which points to be evaluated in the scorefun().") }
+        # Calculate the objective function for each horizon
+        Residuals <- residuals(Yhat, data[[model$output]])
+        scoreval <- sapply(1:ncol(Yhat), function(i){
+            scorefun(Residuals[data$scoreperiod,i])
+        })
+        nams(scoreval) <- nams(Yhat)
+    }else{
+        scoreval <- NA
+    }
+
+    # 
+    if(returnanalysis){
+        # The estimated coefficients
+        Lfitval <- getse(Lresult, "Theta", fun=as.data.frame)
+        # Return the model validation data
+        invisible(structure(list(Yhat = Yhat, model = model$clone_deep(), data = data, Lfitval = Lfitval, scoreval = scoreval), class = c("forecastmodel_fit","rls_fit")))
+    }else{
+        # Only the summed score returned
+        val <- sum(scoreval, na.rm = TRUE)
+        if(is.na(val)){ stop("Cannot calculate the scorefunction for any horizon") }
+        if(printout){ print(c(scoreval,sum=val))}
+        return(val)
+    }
+}
diff --git a/R/rls_optim.R b/R/rls_optim.R
new file mode 100644
index 0000000000000000000000000000000000000000..23995fb8d0cc366f828b35935f793773ce1a6167
--- /dev/null
+++ b/R/rls_optim.R
@@ -0,0 +1,108 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?rls_optim
+
+
+#' Optimize parameters (transformation stage) of RLS model
+#'
+#' This is a wrapper for \code{\link{optim}} to enable easy use of bounds and caching in the optimization.
+#' 
+#' @title Optimize parameters for onlineforecast model fitted with RLS
+#' @param model The onlineforecast model, including inputs, output, kseq, p
+#' @param data The data.list including the variables used in the model.
+#' @param scorefun The function to be score used for calculating the score to be optimized.
+#' @param cachedir A character specifying the path (and prefix) of the cache file name. If set to \code{""}, then no cache will be loaded or written.
+#' @param printout A logical determining if the score function is printed out in each iteration of the optimization.
+#' @param method The method argument for \code{\link{optim}}.
+#' @param ... Additional parameters to \code{\link{optim}}
+#' @return Result object of optim().
+#' Parameters resulting from the optimization can be found from \code{result$par}
+#' @seealso \code{link{optim}} for how to control the optimization.
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "Ta", mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#'
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#' # Now we can fit the model and get the score, as it is
+#' rls_fit(model=model, data=D, scorefun=rmse, returnanalysis=FALSE)
+#' # Or we can change the lambda
+#' rls_fit(c(lambda=0.9), model, D, rmse, returnanalysis=FALSE)
+#'
+#' # This could be passed to optim() (or any optimizer). See \code{forecastmodel$insert_prm()} for more details.
+#' optim(c(lambda=0.98), rls_fit, model=model, data=D, scorefun=rmse, returnanalysis=FALSE, lower=c(lambda=0.9), upper=c(lambda=0.999), method="L-BFGS-B")
+#'
+#' # rls_optim is simply a helper, it's makes using bounds easiere and enables caching of the results
+#' # First add bounds for lambda (lower, init, upper)
+#' model$add_prmbounds(lambda = c(0.9, 0.98, 0.999))
+#'
+#' # Now the same optimization as above can be done by
+#' val <- rls_optim(model, D)
+#' val
+#' 
+#' # Caching can be done by providing a path (try rerunning and see the file in "cache" folder)
+#' val <- rls_optim(model, D, cachedir="cache")
+#' val
+#'
+#' # If anything affecting the results are changed, then the cache is not loaded
+#' model$add_prmbounds(lambda = c(0.89, 0.98, 0.999))
+#' val <- rls_optim(model, D, cachedir="cache")
+#' 
+#' # To delete the cache
+#' file.remove(dir("cache", full.names=TRUE))
+#' file.remove("cache")
+#' 
+#' @export
+rls_optim <- function(model, data, scorefun = rmse, cachedir="", printout=TRUE, method="L-BFGS-B", ...){
+    # Take the parameters bounds from the parameter bounds set in the model
+    init <- model$get_prmbounds("init")
+    lower <- model$get_prmbounds("lower")
+    upper <- model$get_prmbounds("upper")
+    # If bounds are NA, then set
+    if(any(is.na(lower))){ lower[is.na(lower)] <- -Inf}
+    if(any(is.na(upper))){ lower[is.na(upper)] <- Inf}
+
+    # Caching the results based on some of the function arguments
+    if(cachedir != ""){
+        # Have to insert the parameters in the expressions to get the right state of the model for unique checksum
+        model$insert_prm(init)
+        # Give all the elements needed to calculate the unique cache name
+        # This is maybe smarter, don't have to calculate the transformation of the data: cnm <- cache_name(model$regprm, getse(model$inputs, nms="expr"), model$output, model$prmbounds, model$kseq, data, objfun, init, lower, upper, cachedir = cachedir)
+        # Have to reset the state first to remove dependency of previous calls
+        model$reset_state()
+        cnm <- cache_name(rls_fit, rls_optim, model$outputrange, model$regprm, model$transform_data(data), data[[model$output]], scorefun, init, lower, upper, cachedir = cachedir)
+        # Maybe load the cached result
+        if(file.exists(cnm)){ return(readRDS(cnm)) }
+    }
+
+    # Run the optimization
+    res <- optim(par = init,
+                 fn = rls_fit,
+                 # Parameters to pass to rls_fit
+                 model = model,
+                 data = data,
+                 scorefun = scorefun,
+                 printout = printout,
+                 returnanalysis = FALSE,
+                 # Parameters to pass to optim
+                 lower = lower,
+                 upper = upper,
+                 method =  method,
+                 ...)
+    
+    # Save the result in the cachedir
+    if(cachedir != ""){ cache_save(res, cnm) }
+    # Return the result
+    return(res)
+}
+
diff --git a/R/rls_predict.R b/R/rls_predict.R
new file mode 100644
index 0000000000000000000000000000000000000000..8c6213aa24c1ea92e068a93380c3d72a8a51ac08
--- /dev/null
+++ b/R/rls_predict.R
@@ -0,0 +1,98 @@
+#' Use a fitted forecast model to predict its output variable with transformed data.
+#'
+#' See the ??ref(recursive updating vignette).
+#'
+#' @title Prediction with an rls model.
+#' @param model Onlineforecast model object which has been fitted.
+#' @param datatr Transformed data.
+#' @return The Yhat forecast matrix with a forecast for each model$kseq and for each time point in \code{datatr$t}.
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "Ta", mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#'
+#' # Before fitting the model, define which points to include in the evaluation of the score function
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # And the sequence of horizons to fit for
+#' model$kseq <- 1:6
+#'
+#' # Transform using the mdoel
+#' datatr <- model$transform_data(D)
+#'
+#' # See the transformed data
+#' str(datatr)
+#'
+#' # The model has not been fitted
+#' model$Lfits
+#'
+#' # To fit
+#' rls_fit(model=model, data=D)
+#'
+#' # Now the fits for each horizon are there (the latest update)
+#' # For example the current parameter estimates
+#' model$Lfits$k1$theta
+#'
+#' # Use the current values for prediction
+#' D$Yhat <- rls_predict(model, datatr)
+#'
+#' # Plot it
+#' plot_ts(D, c("y|Yhat"), kseq=1)
+#'
+#' # Recursive updating and prediction
+#' Dnew <- subset(Dbuildingheatload, c("2011-01-01", "2011-01-02"))
+#'
+#' for(i in 1:length(Dnew$t)){
+#'     # New data arrives
+#'     Dt <- subset(Dnew, i)
+#'     # Remember that the transformation must only be done once if some transformation which is has a state, e.g. lp(), is used
+#'     datatr <- model$transform_data(Dt)
+#'     # Update, remember that this must only be once for each new point (it updates the parameter estimates, i.e. model$Lfits)
+#'     rls_update(model, datatr, Dt$heatload)
+#'     # Now predict to generate the new forecast
+#'     print(rls_predict(model, datatr))
+#' }
+#'
+#' @export
+rls_predict <- function(model, datatr = NA) {
+    # - model: the model object
+    # - datatr: is a datalist which holds the transformed inputs
+
+    # Predict with the model for each k
+    Yhat <- sapply(model$kseq, function(k) {
+        # Take the fit for k
+        theta <- model$Lfits[[pst("k",k)]]$theta
+        # Form the regressor matrix
+        X <- sapply(datatr, function(x) {
+            x[, pst("k", k)]
+        })
+        # Catch if only one row, then X is vector, convert to matrix
+        if (is.null(dim(X))) {
+            X <- matrix(X, ncol = length(X), dimnames = list(NULL, nams(X)))
+        }
+        # The predictions
+        yhat <- as.numeric(rep(NA, nrow(X)))
+        #
+        iOk <- which(apply(is.na(X), 1, sum) == 0)
+        for (i in iOk) {
+            x <- matrix(X[i, ])
+            # Predict
+            yhat[i] <- t(x) %*% theta
+        }
+        return(yhat)
+    })
+    if (is.null(dim(Yhat))) {
+        Yhat <- matrix(Yhat, ncol = length(Yhat), dimnames = list(NULL, nams(Yhat)))
+    }
+    Yhat <- as.data.frame(Yhat)
+    nams(Yhat) <- pst("k", model$kseq)
+    # Maybe crop the output
+    if(!is.na(model$outputrange[1])){ Yhat[Yhat < model$outputrange[1]] <- model$outputrange[1] }
+    if(!is.na(model$outputrange[2])){ Yhat[model$outputrange[1] > Yhat] <- model$outputrange[2] }
+    #
+    return(Yhat)
+}
diff --git a/R/rls_prm.R b/R/rls_prm.R
new file mode 100644
index 0000000000000000000000000000000000000000..22a95ca2cc3c50c092ab4fbb505f30f79bd15ad9
--- /dev/null
+++ b/R/rls_prm.R
@@ -0,0 +1,47 @@
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?rls_prm
+
+#' Function for generating the parameters for RLS regression
+#'
+#' The RLS needs only a forgetting factor parameter.
+#' 
+#' @title Function for generating the parameters for RLS regression
+#' @param lambda The forgetting factor
+#' @return A list of the parameters
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # Define a model 
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "Ta", mu = "ones()")
+#' model$kseq <- 1:6
+#'
+#' # Here the expression which sets the parameters is defined
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#' model$regprmexpr
+#'
+#' # These will fit with lambda=0.99
+#' rls_fit(prm=NA, model, D)
+#' rls_fit(prm=c(lambda=0.99), model, D)
+#'
+#' # The expression is evaluated when the model is fitted
+#' rls_fit(prm=c(lambda=0.85), model, D)
+#'
+#' # What happens is simply that the expression was manipulated
+#' model$regprmexpr
+#' model$regprm
+#'
+#' # Same change could be done by
+#' model$regprm <- list(lambda=0.3)
+#' model$regprm
+#' val <- rls_fit(prm=NA, model, D)
+#' 
+#' @export
+rls_prm <- function(lambda) {
+    list(lambda = lambda)
+}
diff --git a/R/rls_reduce.R b/R/rls_reduce.R
new file mode 100644
index 0000000000000000000000000000000000000000..1ea032c952c27ef45c6f1505de01f0f0642d35cc
--- /dev/null
+++ b/R/rls_reduce.R
@@ -0,0 +1,74 @@
+rls_reduce <- function(model, data, preduce=list(NA), scorefun = rmse){
+    ## prm test
+    ##preduce <- list(I__degree = c(min=1, init=7), mu_tday__nharmonics = c(min=1, init=7))
+    prmin <- unlist(getse(preduce, 1))
+    pr <- unlist(getse(preduce, 2))
+    ##!! deep=TRUE didn't work, gave: "Error: C stack usage  9524532 is too close to the limit"
+    m <- model$clone_deep()
+    ## Insert the starting p reduction values
+    if(!is.na(preduce[1])){
+        m$insert_prm(pr)
+    }
+    ##
+    library(parallel)
+    ##
+    valref <- rls_optim(m, data, printout=FALSE)$value
+    ##
+    while(TRUE){
+        ##
+        cat("------------------------------------\nReference score value",valref,"\n")
+        ## --------
+        ## Remove inputs one by one
+        cat("\nRemoving inputs one by one\n")
+        valsrm <- mclapply(1:length(model$inputs), function(i){
+            mr <- m$clone_deep()
+            mr$inputs[[i]] <- NULL
+            rls_optim(mr, data, printout=FALSE)$value
+        })
+        valsrm <- unlist(valsrm)
+        names(valsrm) <- names(m$inputs)
+        cat("Scores\n")
+        print(valsrm)
+        ## --------
+        ## Reduce parameter values if specified
+        if(!is.na(pr[1])){
+            cat("\nReducing prm with -1 one by one\n")
+            valspr <- mclapply(1:length(pr), function(i){
+                mr <- m$clone_deep()
+                p <- pr
+                ## Only count down if above minimum
+                if( p[i] >= prmin[i] ){
+                    p[i] <- p[i] - 1
+                }
+                mr$insert_prm(p)
+                val <- rls_optim(mr, data, printout=FALSE)$value
+                ##
+                return(val)
+            })
+            valspr <- unlist(valspr)
+            names(valspr) <- names(pr)
+            cat("Scores\n")
+            print(valspr)
+        }
+        ## Is one the reduced smaller than the current ref?
+        if( min(c(valsrm,valspr)) < valref ){
+            if(which.min(c(min(valsrm),min(valspr))) == 1){
+                ## One of the models with one of the inputs removed is best
+                imin <- which.min(valsrm)
+                cat("Removing input",names(m$inputs)[imin],"\n")
+                m$inputs[[imin]] <- NULL
+            }else{
+                ## One of the models with reduced parameter values is best
+                imin <- which.min(valspr)
+                pr[imin] <- pr[imin] - 1
+                m$insert_prm(pr)
+                cat("Reduced parameter",names(pr)[imin],"to:",pr[imin],"\n")
+            }
+            valref <- min(c(valsrm,valspr))
+        }else{
+            ## No improvement obtained from reduction, so return the current model
+            cat("------------------------------------\n\nDone\n")
+            return(m)
+        }
+    }
+}
diff --git a/R/rls_summary.R b/R/rls_summary.R
new file mode 100644
index 0000000000000000000000000000000000000000..5bdb99ff21d73f01f014d1fe9200db8ea66ef447
--- /dev/null
+++ b/R/rls_summary.R
@@ -0,0 +1,131 @@
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?rls_summary
+
+#' The summary of an onlineforecast model fitted with RLS with simple stats providing a simple overview.
+#'
+#' The following is printed:
+#' 
+#' * The model.
+#'
+#' * Number of observations included in the scoreperiod.
+#'
+#' * RLS coefficients summary statistics for the estimated coefficient time series (since observations are correlated, then usual statistics cannot be applied directly):
+#'
+#'     - mean: the sample mean of the series.
+#'
+#'     - sd: sample standard deviation of the series.
+#'
+#'     - min: minimum of the series.
+#'
+#'     - max: maximum of the series.
+#'
+#' * Scorefunction applied for each horizon, per default the RMSE.
+#'
+#' @title Print summary of an onlineforecast model fitted with RLS
+#' @param object of class \code{rls_fit}, so a fit calculated by \code{\link{rls_fit}}.
+#' @param scoreperiod logical (or index). If this scoreperiod is given, then it will be used over the one in the fit.
+#' @param scorefun The score function to be applied on each horizon.
+#' @param usecomplete Use on the set of observations which is complete on all horizons.
+#' @param printit Print the result.
+#' @return A list of:
+#' 
+#'     - scorefun.
+#' 
+#'     - scoreval (value of the scorefun for each horizon).
+#' 
+#'     - scoreperiod is the scoreperiod used.
+#'
+#' @examples
+#'
+#' # Take data (See vignette ??(ref) for better model and more details)
+#' D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+#' D$y <- D$heatload
+#' D$scoreperiod <- in_range("2010-12-20", D$t)
+#' # Define a model
+#' model <- forecastmodel$new()
+#' model$add_inputs(Ta = "Ta",
+#'                  mu = "ones()")
+#' model$add_regprm("rls_prm(lambda=0.99)")
+#' model$kseq <- 1:6
+#' # Fit it
+#' fit <- rls_fit(prm=c(lambda=0.99), model, D)
+#'
+#' # Print the summary
+#' summary(fit)
+#' # We see:
+#' #   - The model (output, inputs, lambda)
+#' #   - The Ta coefficient is around -0.12 in average (for all horizons) with a standard dev. of 0.03, so not varying extremely (between -0.18 and -0.027)
+#' #   - The intercept mu is around 5.5 and varying very little.
+#' #   - The RMSE is around 0.9 for all horizons.
+#'
+#' # The residuals and coefficient series can be seen by
+#' plot_ts.rls_fit(fit)
+#'
+#' 
+#' @export
+rls_summary <- function(object, scoreperiod = NA, scorefun = rmse, usecomplete = TRUE, printit = TRUE){
+    fit <- object
+    #
+    scipen <- options(scipen=10)$scipen
+    # 
+    tmp <- score_fit(fit, scoreperiod, usecomplete, scorefun)
+    scoreval <- tmp$scoreval
+    scoreperiodused <- tmp$scoreperiod
+    retval <- list(scorefun = scorefun, scoreval = scoreval, scoreperiod = scoreperiodused)
+    # Return the result before print?
+    if(!printit){
+        return(retval)
+    }
+    # Insert the optimized parameters
+    m <- fit$model$clone_deep()
+    m$prm[names(m$prm)] <- signif(m$prm, digits=3)
+    m$insert_prm(m$prm)
+    print(m)
+    #
+    cat("Regression parameters:\n")
+    for(i in 1:length(m$regprm)){
+        cat("    ",names(m$regprm)[i],"=",unlist(m$regprm[i]),"\n")
+    }
+    #
+    cat("\nScoreperiod:",sum(scoreperiodused),"observations are included.\n")
+    #
+    cat("\nRLS coeffients summary stats (cannot be used for significance tests):\n")
+    coef <- t(sapply(1:length(fit$Lfitval[[1]]), function(i){
+        val <- sapply(fit$Lfitval, function(Theta){
+            Theta[scoreperiodused,i]
+        })
+        #
+        m <- mean(val,na.rm=TRUE)
+        s <- sd(val,na.rm=TRUE)
+        #abscv <- abs(s/m)
+        # # An AR1 coefficient can tell a bit about the behaviour of the coefficient
+        # x <- c(val)
+        # xl1 <- lag(x,1)
+        #
+        c(mean=m, sd=s, min=min(val,na.rm=TRUE), max=max(val,na.rm=TRUE)) #coefvar=abscv, skewness=skewness(val, na.rm=TRUE))#, ar1=unname(lm(x ~ xl1)$coefficients[2]))
+    }))
+    rownames(coef) <- names(fit$Lfitval[[1]])
+    print(signif(coef, digits=2))
+    options(scipen=scipen)
+    #
+    # Print the score
+    if("scorefun" %in% names(as.list(match.call()))){
+        scorename <- as.list(match.call())$scorefun
+    }else{
+        scorename = "rmse"
+    }
+    if( any(scoreval < 10) ){
+        tmp <- signif(scoreval, digits=2)
+    }else{
+        tmp <- round(scoreval, digits=1)
+    }
+    cat(pst("\n",toupper(scorename),":\n"))
+    print(tmp)
+    cat("\n")
+    invisible(list(scorefun = scorefun, scoreval = scoreval, scoreperiod = scoreperiodused))
+}
+
+#' @export
+summary.rls_fit <- rls_summary
diff --git a/R/rls_update.R b/R/rls_update.R
new file mode 100644
index 0000000000000000000000000000000000000000..269f5d0317430a6674003a17b4cb93fb484bd5ec
--- /dev/null
+++ b/R/rls_update.R
@@ -0,0 +1,166 @@
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?rls_update
+
+#' Calculates the RLS update of the model coefficients with the provived data.
+#'
+#' See vignette ??ref(recursive updating) on how to use the function. 
+#' 
+#' @title Updates the model fits
+#' @param model A model object
+#' @param datatr a data.list with transformed data (from model$transform_data(D))
+#' @param y A vector of the model output for the corresponding time steps in \code{datatr}
+#' @param runcpp Optional, default = TRUE. If TRUE, a c++ implementation of the update is run, otherwise a slower R implementation is used.
+#' @return
+#'
+#' Returns a named list for each horizon (\code{model$kseq}) containing the variables needed for the RLS fit (for each horizon, which is saved in model$Lfits):
+#'
+#' It will update variables in the forecast model object.
+#'
+#' @seealso
+#' See \code{\link{rls_predict}}.
+#' 
+#' @examples
+#'
+#' # See rls_predict examples
+#'
+#' @export
+
+rls_update <- function(model, datatr = NA, y = NA, runcpp=TRUE) {
+    # Take the inputs data and bind with the kept inputs data in the fit
+    #
+    # The data must be kept for later updating, done below
+    # The last part of the input data is needed for next update
+
+    # Find the number of parameters for the regression
+    np <- length(datatr)
+
+    # Keep only the last kmax rows for next time
+    kmax <- max(as.integer(gsub("k", "", nams(datatr[[1]]))))
+
+    # Check if data was kept
+    kept_input_data <- !is.na(model$datatr[1])
+    #
+    if (kept_input_data) {
+        # Find the start index for iterating later (the index to start updating from)
+        # How many points are kept plus one
+        istart <- nrow(model$datatr[[1]]) + 1
+        # Bind together new and kept data
+        for (i in 1:length(datatr)) {
+            # Bind them
+            datatr[[i]] <- rbind(model$datatr[[i]], datatr[[i]])
+            # Keep only the last kmax rows for next time
+            # Done below: model$datatr[[i]] <- datatr[[i]][(n+1):(kmax+n), ]
+        }
+        # Also for y to sync with X
+        y <- c(rep(NA,istart-1), y)
+    } else {
+        # Set later when nothing is kept (it must be set to k+1)
+        istart <- NA
+    }
+
+    # The number of points
+    n <- length(y)
+
+    # Parameters for rls
+    lambda <- model$regprm$lambda
+
+    if(runcpp){
+        L <- lapply(model$Lfits, function(fit) {
+
+            # Take the needed values from the fit
+            k <- fit$k
+            theta <- fit$theta
+            # The non cpp keeps R, see below
+            if(is.null(fit$P)){
+                P <- solve(fit$R)
+            }else{ P <- fit$P }
+
+            # Form the regressor matrix, don't lag it
+            X <- as.matrix(as.data.frame(subset(datatr, kseq=k)))
+
+            # When nothing was kept
+            if(!kept_input_data){ istart <- k + 1 }
+            val <- rls_update_cpp(y, X, theta, P, lambda, k, n, np, istart, kmax)
+            # Give names to the matrices (maybe faster if done in cpp function, see in the end)
+            colnames(val$fit$P) <- names(datatr)
+            colnames(val$result$Theta) <- names(datatr)
+            # Give the result
+            return(val)
+        })
+    }else{
+        # Fit the model for each k
+        L <- lapply(model$Lfits, function(fit) {
+            # Take the needed values from the fit
+            k <- fit$k
+            theta <- fit$theta
+            # The cpp keeps P
+            if (is.null(fit$R)) {
+                R <- solve(fit$P)
+            } else {
+                R <- fit$R
+            }
+
+            # Form the regressor matrix, don't lag it
+            X <- as.data.frame(subset(datatr, kseq=k))
+
+            # Prepare for keeping for the parameter estimates
+            Theta <- matrix(as.numeric(NA), nrow = n, ncol = np)
+
+            # Make vector for predictions k steps ahead
+            yhat <- rep(NA,length(y))
+            # If input data was kept (i.e. it is not a fresh update), NAs was added above in X and y, so insert the kept yhat
+            if(kept_input_data){ yhat[1:length(fit$yhat)] <- fit$yhat}
+
+            # When nothing was kept
+            if(!kept_input_data){ istart <- k + 1 }
+
+            # Iterate through
+            for (i in istart:n) {
+                # Take the forecast k steps back to match it with y[i]
+                x <- t(as.matrix(X[i-k, ]))
+
+                if(!any(is.na(x)) & !is.na(y[i])){
+                    # Update
+                    R <- lambda * R + x %*% t(x)
+                    theta <- theta + solve(R, x) %*% (y[i] - t(x) %*% theta)
+                    Theta[i, ] <- t(theta)
+                }
+
+                # Make a prediction
+                yhat[i] <- as.matrix(X[i, ]) %*% theta
+            }
+            #
+            # Give names to the matrices
+            colnames(P) <- names(datatr)
+            colnames(Theta) <- names(datatr)
+            # Return the fit and result
+            return(list(fit=list(k=k, theta=theta, R=R, yhat=yhat[(n-kmax+1):n]),
+                        result=list(yhat=yhat, Theta=Theta)))
+        })
+    }
+    #
+    # Keep the last part of the transformed data for later
+    model$datatr <- subset(datatr, (n-kmax+1):n)
+    # Store the values of y if the model has AR term
+    if(!is.na(model$maxlagAR)){
+        # Was data kept?
+        if(kept_input_data){
+            # Yes, so put together with the kept
+            tmpy <- c(model$yAR, y[istart:n])
+        }else{
+            # No, then just take from y
+            tmpy <- y
+        }
+        # In case too few new values, then fill with NAs
+        if((model$maxlagAR+1) > length(tmpy)){ tmpy <- c(rep(NA,(model$maxlagAR+1)-length(tmpy)), tmpy) }
+        # Keep the needed
+        model$yAR <- tmpy[(length(tmpy)-model$maxlagAR):length(tmpy)]
+    }
+    #
+    # Keep the fit
+    model$Lfits <- getse(L, "fit")
+    # Return Theta in a list for each k
+    invisible(getse(L, "result"))
+}
diff --git a/R/rmse.R b/R/rmse.R
new file mode 100644
index 0000000000000000000000000000000000000000..afe1dd4123e32ac9a6871365698d4b70cd85896e
--- /dev/null
+++ b/R/rmse.R
@@ -0,0 +1,39 @@
+# Do this in a separate file to see the generated help:
+# library(devtools)
+# document()
+# load_all(as.package("../../onlineforecast"))
+# ?rmse
+
+#' Returns the RMSE.
+#'
+#' Used for forecast evaluation evaluation and optimization of parameters in model fitting.
+#'
+#' Note that \code{NA}s are ignored (i.e. \code{mean} is called with \code{na.rm=TRUE}).
+#'
+#' @title Computes the RMSE score.
+#' @param x a numerical vector of residuals.
+#' @return The RMSE score.
+#' @seealso \code{\link{score_for_k}()} for calculation of a score for the k'th horizon, and \code{\link{score_fit}()} which takes a forecastmodel fit and returns score taking scoreperiod etc. into account.
+#' @name rmse
+#' @examples
+#'
+#'
+#'  # Just a vector to be forecasted
+#'  y <- c(filter(rnorm(100), 0.95, "recursive"))
+#'  # Generate a forecast matrix with a simple persistence model
+#'  Yhat <- persistence(y, kseq=1:4)
+#'  # The residuals for each horizon
+#'  Resid <- residuals(Yhat, y)
+#'
+#' # Calculate the score for the k1 horizon
+#' rmse(Resid$h1)
+#'
+#' # For all horizons
+#' apply(Resid, 2, rmse)
+#'
+#' 
+#' @export
+
+rmse <- function(x) {
+    sqrt(mean(x^2, na.rm = TRUE))
+}
diff --git a/R/score_fit.R b/R/score_fit.R
new file mode 100644
index 0000000000000000000000000000000000000000..e4e9e8dca927a601cd07a5c996783ec309c49a0e
--- /dev/null
+++ b/R/score_fit.R
@@ -0,0 +1,58 @@
+
+#' Calculate the score for each horizon for a forecast model fit.
+#'
+#' For evaluation of the score on each horizon, as specified in the fit. Use it for a consistent evaluation.
+#' 
+#' @title Calculates scores for a forecast model fit.
+#' @param fit A model fit 
+#' @param scoreperiod as an index (logical or integer) defining which points to inlude in the score calculation. If NA, the \code{scoreperiod} from the \code{fit$model} object is used.
+#' @param usecomplete Only use points where 
+#' @param scorefun The score function applied, per default \code{\link{rmse}}.
+#' @seealso \code{\link{rmse}} and \code{\link{score_for_k}} which are used in this function.
+#' @return A list with:
+#'   - \code{scoreval} is the score value
+#'   - \code{scoreperiod} is the score period used (can be different that the one in arguments \code{fit} or \code{scoreperiod})
+#'   - \code{scorename} is the name of the score function applied
+#' @export
+score_fit <- function(fit, scoreperiod = NA, usecomplete = TRUE, scorefun = rmse){
+    # Calculate the score for each horizon
+    if("scorefun" %in% names(as.list(match.call()))){
+        scorename <- as.list(match.call())$scorefun
+    }else{
+        scorename = "rmse"
+    }
+
+    # Check score period
+    txt <- ": It must be set to an index (int or logical) defining which points to be evaluated in the scorefun()."
+    if(is.na(scoreperiod[1])){
+        if("scoreperiod" %in% nams(fit$data)){
+            scoreperiod <- fit$data$scoreperiod
+        }else{
+            stop("scoreperiod is not set. Set it in the data used in the fit function or as argument in the present call:",txt)
+        }
+    }
+
+    # Calculate the Residuals if they were not in fit
+    if( !"Residuals" %in% names(fit) ){
+        # Calculate the residuals
+        Residuals <- residuals(fit$Yhat, fit$data[fit$model$output])
+    }else{
+        Residuals <- fit$Residuals
+    }
+
+    # Calculate the score
+    tmp <- score_for_k(Residuals, scoreperiod, usecomplete)
+    scoreval <- tmp$scoreval
+    scoreperiod <- tmp$scoreperiod
+    
+    # Give a warning if the score 
+    if(!is.na(fit$scoreval[1])){
+        if(length(fit$scoreval) != length(scoreval)){
+            warning("fit contains 'scoreval', which is different in length than the scoreval calculated here (probably for different horizons (kseq))")
+        }else if(!all(fit$scoreval == scoreval)){
+            warning("fit contains 'scoreval' which is different from the",scorename,"score calculated now (can also be because of 'usecomplete = TRUE', such that only points which have forecasts for all horizons are included.")
+        }
+    }
+    #
+    list(scoreval=scoreval, scoreperiod=scoreperiod, scorename=scorename)
+}
diff --git a/R/score_for_k.R b/R/score_for_k.R
new file mode 100644
index 0000000000000000000000000000000000000000..7becbcab0177f0f7d96351011d7a2f1d6853b623
--- /dev/null
+++ b/R/score_for_k.R
@@ -0,0 +1,58 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?score_for_k
+
+#' Calculates the score for each horizon for a matrix with residuals for each horizon.
+#'
+#' Applies the \code{scorefun} on all horizons (each column) of the residuals matrix. See the description of each parameter for more details.
+#' 
+#' @title Calculate the score for each horizon.
+#' @param Residuals A matrix with residuals (columns named \code{hxx}) for which to calculate the score for each horizon.
+#' @param scoreperiod as a logical vector controlling which points to be included in the score calculation. If NA then all values are included.
+#' @param usecomplete if TRUE then only the values available for all horizons are included (i.e. if at one time point there is a missing value, then values for this time point is removed for all horizons in the calculation).
+#' @param scorefun The score function.
+#' @return A list with the a numeric vector with the score value for each horizon and the applied \code{scoreperiod} (note can be different from the given scoreperiod, if only complete observations are used (as per default)).
+#' @examples
+#'
+#' # Just a vector to be forecasted
+#' y <- c(filter(rnorm(100), 0.95, "recursive"))
+#' # Generate a forecast matrix with a simple persistence model
+#' Yhat <- persistence(y, kseq=1:4)
+#' # The residuals for each horizon
+#' Resid <- residuals(Yhat, y)
+#'
+#' # Calculate the score for the k1 horizon
+#' score_for_k(Resid)$scoreval
+#'
+#' # The first values were excluded, since there are NAs
+#' head(Resid)
+#' score_for_k(Resid)$scoreperiod
+#'
+#' 
+#' @export
+score_for_k <- function(Residuals, scoreperiod = NA, usecomplete = TRUE, scorefun = rmse){
+    # If no scoreperiod is given, then use all
+    if(is.na(scoreperiod[1])){
+        scoreperiod <- rep(TRUE,nrow(Residuals))
+    }else{
+        # Do checking of scoreperiod
+        if( length(scoreperiod) != nrow(Residuals) ){
+            stop("scoreperiod is not same length as nrow(Residuals):",txt)
+        }else{
+            if( all(is.na(scoreperiod)) ){ stop("scoreperiod is all NA:",txt) }
+        }
+    }
+    # Take only the rows which have a value for each horizon?
+    if(usecomplete){
+        scoreperiod <- scoreperiod & complete.cases(Residuals)
+    }
+    # Calculate the objective function for each horizon
+    scoreval <- sapply(1:ncol(Residuals), function(i){
+        scorefun(Residuals[scoreperiod,i])
+    })
+    nams(scoreval) <- gsub("h","k",nams(Residuals))
+    # 
+    return(list(scoreval=scoreval,scoreperiod=scoreperiod))
+}
diff --git a/R/setpar.R b/R/setpar.R
new file mode 100644
index 0000000000000000000000000000000000000000..60f092f9546abef1d7713c72ddf1016530e7cfd6
--- /dev/null
+++ b/R/setpar.R
@@ -0,0 +1,57 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?aslt
+
+#' Setting \code{\link{par}()} plotting parameters to a set of default values
+#'
+#' A simple function, which sets the \code{\link{par}()} plotting parameters to a default set of values.
+#'
+#' Actually, only really used for setting useful \code{par} values for multiple time series plots with same x-axis. Give \code{tmpl="ts"} and \code{mfrow=c(x,1)}, where x is the number of plots.
+#' 
+#' @title Setting \code{\link{par}()} plotting parameters
+#' @param tmpl The name of the parameter template, give "ts" as default
+#' @param mfrow The mfrow for \code{par}.
+#' @param ... More parameters for \code{par}.
+#' @return Return the original set of parameters, such that they can be reset after plotting.
+#' @examples
+#'
+#' # Make some data
+#' D <- data.frame(t=seq(asct("2020-01-01"),asct("2020-01-10"),len=100), x=rnorm(100), y=runif(100))
+#'
+#' # Generate two stacked plots with same x-axis
+#' setpar("ts", mfrow=c(2,1))
+#' plot(D$t, D$x, type="l")
+#' plot(D$t, D$y, type="l")
+#' # Note xaxt="s" must be set
+#' axis.POSIXct(1, D$t, xaxt="s", format="%Y-%m-%d")
+#' 
+#' # In a function, where this is used and a plot is generated, do like this to reset on exit
+#' oldpar <- setpar(mfrow=c(2,1))
+#' on.exit(par(oldpar))        
+#' 
+#' @export
+setpar <- function(tmpl = "ts", mfrow = c(1,1), ...) {
+    # Get par list
+    p <- par(no.readonly = TRUE)
+    # Templates
+    if (tmpl == "ts") {
+        par(mfrow = mfrow, oma = c(3, 0, 2, 0), mar = c(0, 4, 1, 0), xaxt = "n", 
+            mgp = c(2.2, 0.4, 0), tcl = -0.4, ...)
+    }else if (tmpl == "pdf") {
+        par(mar = c(4, 4, 1, 1), mgp = c(2.2, 0.7, 0), tcl = -0.4, ...)
+    }else{
+        stop("Must give tmpl like 'ts' or 'pdf'")
+    }
+    
+    # Replace all the parameters given in prm Get only the ... parameters
+    i <- which(!nams(match.call()) %in% nams(match.call(expand.dots = TRUE)))
+    if (length(i) > 0) {
+        par(...)
+        # prm <- as.list(match.call()[i]) p <- list() for(i in 1:length(prm)) { p$new <-
+        # eval(prm[[i]]) nams(p)[i] <- nams(prm)[i] } par(p)
+    }
+    # Set par and return the original par options(warn = (-1)) options(warn = 1)
+    invisible(p)
+}
diff --git a/R/stairs.R b/R/stairs.R
new file mode 100644
index 0000000000000000000000000000000000000000..47f10d03d5c72565316955c7c0560626ceaddb3d
--- /dev/null
+++ b/R/stairs.R
@@ -0,0 +1,57 @@
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?stairs
+
+
+#' Plotting steps with time point at end of interval
+#'
+#' It's easy to plot stairs with \code{plot(x,y,type="s")}, however that makes the steps forward from \code{x}, for time series this works if the time points are at the beginning of the intervals.
+#'
+#' Often with time series the time points are in the end of the intervals, so the steps should go backaward, this is achieved with this function.
+#' 
+#' @title Plotting stairs with time point at end of interval
+#' @param x x values for plot
+#' @param y y values for plot
+#' @param type if 'b' then include points
+#' @param preline if TRUE, then a line backwards from the first point is added
+#' @param ... Passed to \code{lines()} and \code{points()} when they are called in the function
+#' @examples
+#'
+#' # Usual stairs plot has steps forward from x
+#' x <- rnorm(10)
+#' plot(1:10, x, type="s")
+#'
+#' # Stairs with step backward from x
+#' plot(1:10, x, type="n")
+#' stairs(1:10, x)
+#'
+#' # Use for time series plotting
+#' plot_ts(Dbuildingheatload, "heatload", c("2010-12-15","2010-12-16"), plotfun=stairs)
+#'
+#' # Set it globally for all plot_ts
+#' p <- par_ts()
+#' p$plotfun <- stairs
+#' options(par_ts=p)
+#' plot_ts(Dbuildingheatload, "heatload", c("2010-12-15","2010-12-16"))
+#'
+#' # Modify it to only lines
+#' plot_ts(Dbuildingheatload, "heatload", c("2010-12-15","2010-12-16"), plotfun=function(x,y,...){stairs(x,y, type="l")})
+#'
+#' @export
+stairs <- function(x, y, type="b", preline=FALSE, pch=19, ...)
+{
+    xp <- rep(x,each=2)
+    yp <- rep(y,each=2)
+
+    xp <- c(xp[1]-(xp[3]-xp[1]), xp)
+    yp <- c(yp,yp[length(yp)])
+    if(!preline){
+        yp[1] <- NA
+    }
+    #
+    lines(xp, yp, ...)
+    if(type == "b"){
+        points(x, y, pch=pch, ...)
+    }
+}
diff --git a/R/state_getval.R b/R/state_getval.R
new file mode 100644
index 0000000000000000000000000000000000000000..71bdb21995077670761465df6509ca9c511840e0
--- /dev/null
+++ b/R/state_getval.R
@@ -0,0 +1,57 @@
+# Do this in a separate file to see the generated help:
+#library(devtools)
+#document()
+#load_all(as.package("../../onlineforecast"))
+#?state_getval
+
+#' Get the state value kept in last call to the transformation function.
+#'
+#' Transformation functions (e.g. \code{\link{lp}}, \code{\link{fs}}, \code{\link{bspline}}) can need to keep a state value between calls, e.g. when new data arrives and must be transformed. This function is used to getting the state values set in last call to the function.
+#'
+#' Uses the \code{input_class$state_getval()}.
+#' 
+#' @title Get the state value kept in last call.
+#' @param initval If no state was kept, then this init value is returned.
+#' @return The state value, but if not found, then the initval.
+#' @seealso \code{\link{state_setval}()} for setting the state value and \code{\link{input_class}}.
+#' @examples
+#'
+#' # See how it can be used in lp, which needs to save the state of the filter
+#' # Note how it is not needed to do anything else than getting and setting the state
+#' # in transformations (model$transform_data()), then multiple transformation functions can be called, but they are always in the same order, so the set and get state functions keep a counter internally to make sure that the correct values are set and returned when called again.
+#' lp
+#' 
+#' 
+state_getval <- function(initval) {
+    # Find the environment (frame) with the model
+    # Must be done with for loop, if done with lapply the order of parents change
+    # When this function is called inside a transformation function, then 
+    
+    # browser()
+    # # Debugging to find the content of the environments
+    # for(i in sys.parents()){
+    #     nms <- ls(parent.frame(i+1))
+    #     print(nms)
+    # }
+    #
+
+    # This seems to work all time
+    # When "render("building-heat-load-forecasting.Rmd")" the "input" is not there
+    # Running tests, then the c("data","self") is not there
+    # Sometimes the input object is the only available
+    for(i in sys.parents()){
+        nms <- ls(parent.frame(i+1))
+        if(length(nms) == 1){
+            if(nms == "input"){
+                return(parent.frame(i+1)$input$state_getval(initval))
+            }
+        }else if(length(nms) == 2){
+            if(all(nms[1:2] == c("data","self"))){
+                return(parent.frame(i+1)$self$state_getval(initval))
+            }
+        }
+    }
+    # If made it to here, the input was not found, so throw a meaningful error
+    warning("In state_getval() the object of class input was not found in the parent environments. The initval was returned.")
+    return(initval)
+}
diff --git a/R/state_setval.R b/R/state_setval.R
new file mode 100644
index 0000000000000000000000000000000000000000..00d50a0180f9d618f4699d300889d13f44db73d0
--- /dev/null
+++ b/R/state_setval.R
@@ -0,0 +1,54 @@
+#' Set a state value to be kept for next the transformation function is called.
+#'
+#' Transformation functions (e.g. \code{\link{lp}}, \code{\link{fs}}, \code{\link{bspline}}) can need to keep a state value between calls, e.g. when new data arrives and must be transformed. This function is used to setting the state values set in last call to the function.
+#'
+#' Uses the \code{input_class$state_getval()}.
+#' 
+#' @title Set a state value to be kept for next the transformation function is called.
+#' @param val The value to set and kept for next call.
+#' @seealso \code{\link{state_setval}()} for setting the state value and \code{\link{input_class}}.
+#' @examples
+#'
+#' # See how it can be used in lp, which needs to save the state of the filter
+#' # Note how it is not needed to do anything else than getting and setting the state
+#' # in transformations (model$transform_data()), then multiple transformation functions can be called, but they are always in the same order, so the set and get state functions keep a counter internally to make sure that the correct values are set and returned when called again.
+#' lp
+#' 
+#' 
+state_setval <- function(val) {
+    ## Find the environment (frame) with the model
+    ## Must be done with for loop, if done with lapply the order of parents change
+
+    ## Old way...stopped working in tests, don't know why
+    ## browser()
+    ## for(i in sys.parents()){
+    ##     nms <- ls(parent.frame(i+1))
+    ##     print(nms)
+    ##     ## if(length(nms) == 2){
+    ##     ##     if(all(nms[1:2] == c("data","self"))){
+    ##     ##         break
+    ##     ##     }
+    ##     ## }
+    ## }
+    ## ## Set the values and return
+    ## parent.frame(i+1)$self$state_setval(val)
+
+        ## This seems to work all time
+    ## When "render("building-heat-load-forecasting.Rmd")" the "input" is not there
+    ## Running tests, then the c("data","self") is not there
+    ## Sometimes the input object is the only available
+    for(i in sys.parents()){
+        nms <- ls(parent.frame(i+1))
+        if(length(nms) == 1){
+            if(nms == "input"){
+                return(parent.frame(i+1)$input$state_setval(val))
+            }
+        }else if(length(nms) == 2){
+            if(all(nms[1:2] == c("data","self"))){
+                return(parent.frame(i+1)$self$state_setval(val))
+            }
+        }
+    }
+    ## If made it to here, the input was not found, so throw a meaningful error
+    warning("In state_setval() the object of class input was not found in the parent environments, so the state value could not be updated.")
+}
diff --git a/README.md b/README.md
deleted file mode 100644
index a3b5f1eb958b097db9df9f38a1a24a0598a61f6c..0000000000000000000000000000000000000000
--- a/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-# onlineforecast
-
diff --git a/data/Dbuildingheatload.rda b/data/Dbuildingheatload.rda
new file mode 100644
index 0000000000000000000000000000000000000000..e1de9397212b07ab7cb7d149fa6159417648b100
Binary files /dev/null and b/data/Dbuildingheatload.rda differ
diff --git a/data/Dsolarpower.rda b/data/Dsolarpower.rda
new file mode 100644
index 0000000000000000000000000000000000000000..e94a4e1a0732b848f55104e3700baf71899b283d
Binary files /dev/null and b/data/Dsolarpower.rda differ
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000000000000000000000000000000000000..396e530172dd5c8fee9594779166cb9eface54c3
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,16 @@
+citHeader("To cite onlineforecast in publications use:")
+
+citEntry(
+  entry    = "Article",
+  title    = "Short-term heat load forecasting for single family houses",
+  author   = "Peder Bacher and Henrik Madsen and Henrik Aalborg Nielsen and Bengt Perers",
+  journal  = "Energy and Buildings",
+  year     = "2013",
+  volume   = "65",
+  number   = "0",
+  pages    = "101-112",
+  url      = "http://onlineforecasting.org",
+  textVersion = paste(
+      "We are in process of writing a journal paper about the package, but for now we referer to the paper 'Short-term heat load forecasting for single family houses', in which the implemented modelling is described."
+  )
+)
diff --git a/make.R b/make.R
new file mode 100644
index 0000000000000000000000000000000000000000..b9b2e60552fdba2ed36cf288a372228225f6ccc1
--- /dev/null
+++ b/make.R
@@ -0,0 +1,71 @@
+# These packages must be installed
+
+# For building vignettes
+# install.packages("R.rsp")
+
+# cpp matrix library
+# install.packages("RcppArmadillo")
+
+library(devtools)
+library(roxygen2)
+
+# pack <- as.package("../onlineforecast")
+# load_all(pack)
+
+# Update NAMESPACE, use this function to export all functions! (with @export, but S3methods (e.g. print.lm) will not get exported, so change it to export)
+docit <- function(){
+    document()
+    # Read
+    nm <- "NAMESPACE"
+    x <- scan(nm, what="character", sep="\n",blank.lines.skip=FALSE)
+    # Manipulate x
+    for(i in 1:length(x)){
+        if(length(grep("^S3method", x[i])) > 0){
+            x[i] <- gsub(",",".",gsub("S3method", "export", x[i]))
+        }
+     }
+    #
+    write(x, nm)
+}
+docit()
+
+# ----------------------------------------------------------------
+# For running tests in folder "tests/testthat/"
+# https://kbroman.org/pkg_primer/pages/tests.html
+# http://r-pkgs.had.co.nz/tests.html
+# Initialize first time the the testing framework
+#use_testthat()
+# Init new test
+#use_test("newtest")
+
+# # Run all tests
+# test()
+
+# # Run the examples
+# run_examples()
+
+# # Run tests in a single file
+# load_all(as.package("../onlineforecast"))
+# test_file("tests/testthat/test-rls-heat-load.R")
+
+
+# ----------------------------------------------------------------
+# Build the package (remember to rebuild vignettes for release)
+build(".", vignettes=TRUE)
+
+# Install it
+install.packages("../onlineforecast_0.1.0.tar.gz")
+
+library(onlineforecast)
+
+# # Add new vignette
+#usethis::use_vignette("test")
+
+
+# # ----------------------------------------------------------------
+# # Load the current version directly from the folder
+# docit()
+# load_all(as.package("../../onlineforecast"), export_all=FALSE)
+
+# # What is exported?
+# onlineforecast::
diff --git a/misc-R/.Rhistory~ b/misc-R/.Rhistory~
new file mode 100644
index 0000000000000000000000000000000000000000..5503199be76081abc656fc99a3d14c4f0dbe7f1e
--- /dev/null
+++ b/misc-R/.Rhistory~
@@ -0,0 +1,512 @@
+Dnew_transformed$AR.Order1
+modelAR$yAR
+rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+yhatAR <- rls_predict(modelAR, Dnew_transformed)
+modelAR$yAR
+(i <- iseq[length(iseq)] + 2)
+Dnew <- subset(D, i)
+Dnew_transformed <- model$transform_data(Dnew)
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+yhat <- rls_predict(model, Dnew_transformed)
+iseq <- i+modelAR$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+Ex1data <- data.frame(x = c(71,74,82,76,91,82,82,75,79,82,72,90))
+10:90
+ObsVar <- var(Ex1data$x)
+ObsVar
+1/sqrt(2*pi*ObsVar) * exp(-(71 - 70)^2/(2*ObsVar))
+1/sqrt(2*pi*ObsVar) * exp(-(72 - 70)^2/(2*ObsVar))
+1/sqrt(2*pi*ObsVar) * exp(-(c(71,72) - 70)^2/(2*ObsVar))
+1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - 70)^2/(2*ObsVar))
+prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - 70)^2/(2*ObsVar)))
+LikelihoodAll
+ObsVar <- var(Ex1data$x)
+LikelihoodAll <- sapply(70:91, function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - mean)^2/(2*ObsVar))))
+LikelihoodAll
+plot(LikelihoodAll)
+plot(70:91LikelihoodAll)
+plot(70:91, LikelihoodAll)
+ObsVar <- 1#var(Ex1data$x)
+test <- c(4.6,6.3,5.0)
+LikelihoodAll <- sapply(4:6.5, function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(test - mean)^2/(2*ObsVar))))
+plot(4:6.5, LikelihoodAll)
+LikelihoodAll <- sapply(seq(4,6.5,0.1), function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(test - mean)^2/(2*ObsVar))))
+plot(seq(4,6.5,0.1), LikelihoodAll)
+LikelihoodAll <- sapply(seq(70,95,0.1), function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - mean)^2/(2*ObsVar))))
+Seq <- seq(70,95,0.1
+LikelihoodAll <- sapply(), function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - mean)^2/(2*ObsVar))))
+Seq <- seq(70,95,0.1)
+LikelihoodAll <- sapply(Seq, function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - mean)^2/(2*ObsVar))))
+plot(Seq, LikelihoodAll)
+Seq <- seq(75,85,0.1)
+LikelihoodAll <- sapply(Seq, function(mean) prod(1/sqrt(2*pi*ObsVar) * exp(-(Ex1data$x - mean)^2/(2*ObsVar))))
+plot(Seq, LikelihoodAll)
+L.complete.data <- function(theta) prod(dnorm(Ex1data$x, mean = theta, sd = sqrt(s2)))
+plot(Seq, LikelihoodAll)
+L.complete.data <- function(theta) prod(dnorm(Ex1data$x, mean = theta, sd = sqrt(ObsVar)))
+th <- seq(mean(Ex1data$x) - 3*sqrt(ObsVar), mean(Ex1data$x) + 3* sqrt(ObsVar), length = 200)
+L <- sapply(th, L.complete.data)
+plot(th, L)
+plot(Seq, LikelihoodAll)
+plot(th, L)
+plot(Seq, LikelihoodAll)
+plot(th, L)
+plot(th, L/max(L), ylab = "L", xlab = expression(theta))
+L.ave <- function(theta) dnorm(mean(Ex1data$x), mean = theta, sd = sqrt(ObsVar/length(Ex1data$x)))
+LAverage <- sapply(th, L.ave)
+lines(th, LAverage/max(LAverage), col = "red")
+mean(Ex1data$x)
+abline(v = mean(Ex1data$x), col = "blue")
+mle.estimate <- function(th) -sum(dnorm(Ex1data$x, mean = theta, sd = sqrt(ObsVar), log = TRUE))
+fit <- optiom(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE)
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE)
+?optim
+mle.estimate <- function(th) -sum(dnorm(Ex1data$x, mean = th, sd = sqrt(ObsVar), log = TRUE))
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE)
+mle.estimate <- function(th) -sum(dnorm(Ex1data$x, mean = th, sd = sqrt(ObsVar), log = TRUE))
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE,
+method = "Brent")
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE,
+method = "Brent",
+lower = min(Ex1data$x),
+upper = max(Ex1data$x))
+print(fit)
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE,
+method = "Brent",
+lower = min(Ex1data$x),
+upper = max(Ex1data$x))
+print(fit)
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE,
+method = "Brent",
+lower = min(Ex1data$x),
+upper = max(Ex1data$x))
+print(fit)
+fit$hessian
+1/fit$hessian
+ObsVar
+1
+ObsVar <- var(Ex1data$x)
+mle.estimate <- function(th) -sum(dnorm(Ex1data$x, mean = th, sd = sqrt(ObsVar), log = TRUE))
+fit <- optim(par = mean(Ex1data$x),
+fn = mle.estimate,
+hessian = TRUE,
+method = "Brent",
+lower = min(Ex1data$x),
+upper = max(Ex1data$x))
+print(fit)
+1/fit$hessian
+ObsVar
+print(solve(fit$hessian))
+print(ObsVar)
+print(solve(fit$hessian))
+print(sqrt(ObsVar))
+print(solve(fit$hessian))
+print(ObsVar/length(Ex1data$x))
+print(solve(fit$hessian))
+Ex3data <- data.frame(x = c(4,6,3,7,2,4))
+?dpois
+lambdaseq = seq(-4,0,0.1)
+Posres <- optim(par = lambdaseq,
+fn = PosLogLikelihood)
+PosLogLikelihood <- function(lambda) -sum(dpois(x = Ex3data$x, lambda = lambda, log = TRUE))
+lambdaseq = seq(-4,0,0.1)
+Posres <- optim(par = lambdaseq,
+fn = PosLogLikelihood)
+lambdaseq = seq(-4,0,0.1)
+lambdaseq
+Posres <- optim(par = lambdaseq,
+fn = PosLogLikelihood,
+)
+Posres
+plot(lambdaseq, PosLogLikelihood(lambdaseq))
+dpois(x = Ex3data$x, lambda = 0, log = TRUE)
+Ex3data
+PosLogLikelihood <- function(lambda) sum(dpois(x = Ex3data$x, lambda = lambda, log = TRUE))
+lambdaseq = seq(-4,0,0.1)
+plot(lambdaseq, PosLogLikelihood(lambdaseq))
+lambdaseq = seq(1,8,0.1)
+plot(lambdaseq, PosLogLikelihood(lambdaseq))
+PosLogLikelihood <- function(lambda) sum(dpois(x = Ex3data$x, lambda = lambda, log = TRUE))
+lambdaseq = seq(1,8,0.1)
+plot(lambdaseq, PosLogLikelihood(lambdaseq))
+lambdaseq
+PosLogLikelihood(lambdaseq)
+plot(lambdaseq, sapply(lambdaseq, PosLogLikelihood))
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+lambdaseq = seq(2,7,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+lambdaseq = seq(2.5,7,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+lambdaseq = seq(2.3,7,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+lambdaseq = seq(2.3,7.4,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+lambdaseq = seq(2.3,7.3,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+L.quad <- sapply(lambdaseq, l.quad)
+l.quad <- function(lambda) dpois(x = mean(Ex3data$x), lambda = lambda, log = TRUE) - 0.5*length(Ex3data$x)/mean(Ex3data$x) * (lambda - mean(Ex3data$x))^2
+L.quad <- sapply(lambdaseq, l.quad)
+warnings()
+L.quad
+l.quad <- function(lambda) dpois(x = Ex3data$x, lambda =  mean(Ex3data$x), log = TRUE) - 0.5*length(Ex3data$x)/mean(Ex3data$x) * (lambda - mean(Ex3data$x))^2
+L.quad <- sapply(lambdaseq, l.quad)
+lines(lambdaseq, l.quad, col = "red")
+lines(lambdaseq, L.quad, col = "red")
+L.quad
+l.quad <- function(lambda) sum(dpois(x = Ex3data$x, lambda =  mean(Ex3data$x), log = TRUE)) - 0.5*length(Ex3data$x)/mean(Ex3data$x) * (lambda - mean(Ex3data$x))^2
+L.quad <- sapply(lambdaseq, l.quad)
+lines(lambdaseq, L.quad, col = "red")
+L.quad
+lines(lambdaseq, L.quad - max(L.quad), col = "red")
+PosLogLikelihood <- function(lambda) sum(dpois(x = Ex3data$x, lambda = lambda, log = TRUE))
+lambdaseq = seq(2,7.3,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+l.quad <- function(lambda) sum(dpois(x = Ex3data$x, lambda =  mean(Ex3data$x), log = TRUE)) - 0.5*length(Ex3data$x)/mean(Ex3data$x) * (lambda - mean(Ex3data$x))^2
+L.quad <- sapply(lambdaseq, l.quad)
+lines(lambdaseq, L.quad - max(L.quad), col = "red")
+PosLogLikelihood <- function(lambda) sum(dpois(x = Ex3data$x, lambda = lambda, log = TRUE))
+lambdaseq = seq(2.3,7.3,0.1)
+L <- sapply(lambdaseq, PosLogLikelihood)
+plot(lambdaseq, L - max(L))
+l.quad <- function(lambda) sum(dpois(x = Ex3data$x, lambda =  mean(Ex3data$x), log = TRUE)) - 0.5*length(Ex3data$x)/mean(Ex3data$x) * (lambda - mean(Ex3data$x))^2
+L.quad <- sapply(lambdaseq, l.quad)
+lines(lambdaseq, L.quad - max(L.quad), col = "red")
+plot(lambdaseq, exp(L - max(L)))
+lines(lambdaseq, exp(L.quad - max(L.quad)), col = "red")
+par(mfrow = c(1,2))
+plot(lambdaseq, L - max(L))
+lines(lambdaseq, L.quad - max(L.quad), col = "red")
+plot(lambdaseq, exp(L - max(L)))
+lines(lambdaseq, exp(L.quad - max(L.quad)), col = "red")
+## Demo on how the AR part works in onlineforecast
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+class(Dbuildingheatload)
+D <- Dbuildingheatload
+D$y <- D$Heatload$house9
+plot_ts(D, c("^y","Ta"), kseq=c(0,12))
+plot_ts(D, c("^y","Ta"), "2010-12-15", "2010-12-25", kseq=c(0,12))
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$fiteval <- period("2010-12-20", Dtrain$t)
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()")
+model$add_regp("rls_prm(lambda=0.9)")
+model$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+model$kseq <- c(1,18)
+model$p <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+model$kseq <- 1:36
+val <- rls_fit(model$p, model, D, returnanalysis = TRUE)
+D$Yhat <- val$Yhat
+i <- 200
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+iseq <- which(period("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(model$p, model, Dfit)
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i)
+Dnew_transformed <- model$transform_data(Dnew)
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+yhat <- rls_predict(model, Dnew_transformed)
+## AR part
+#source("AR.R")
+modelAR <- forecastmodel$new()
+modelAR$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()",
+AR = "AR(lags=c(0))")
+modelAR$add_regp("rls_prm(lambda=0.9)")
+modelAR$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+modelAR$p <- rls_optim(modelAR, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+modelAR$kseq <- 1:36
+valAR <- rls_fit(modelAR$p, modelAR, D, returnanalysis = TRUE)
+D$YhatAR <- valAR$Yhat
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-02-01", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-01-10", kseq = c(1,18))
+i <- 200
+iseq <- i+modelAR$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+lines(D$t[iseq], D$YhatAR[i, ], type = "b", col = 4)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)"),
+pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+sqrt(mean( as.numeric((D$y[iseq] -  D$Yhat[i, ])^2), na.rm = T))
+sqrt(mean( as.numeric((D$y[iseq] -  D$YhatAR[i, ])^2), na.rm = T))
+## Update
+## THINKABOUT WITH bigger lag and more new observation!!!
+model <- forecastmodel$new()
+model$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()",
+AR = "AR(lags=c(0,1,4))")
+modelAR$add_regp("rls_prm(lambda=0.9)")
+modelAR$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+iseq <- which(period("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(modelAR$p, modelAR, Dfit)
+str(modelAR$Lfits[1:2])
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i:(i))
+Dnew$y
+modelAR$yAR
+D$y[(i-6):i]
+Dnew_transformed <- modelAR$transform_data(Dnew)
+Dnew_transformed$AR.lag0
+Dnew_transformed$AR.lag1
+Dnew_transformed$AR.lag4
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+## modelAR$yAR
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i:(i))
+## Dnew$y
+## modelAR$yAR
+## tail(Dfit$y)
+## Dnew_transformed <- modelAR$transform_data(Dnew)
+## Dnew_transformed$AR.lag3
+## Dnew_transformed$AR.Order2
+## Dnew_transformed$AR.Order1
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+## modelAR$yAR
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i)
+## Dnew_transformed <- model$transform_data(Dnew)
+## rls_update(model, Dnew_transformed, Dnew[[model$output]])
+## yhat <- rls_predict(model, Dnew_transformed)
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+## Demo on how the AR part works in onlineforecast
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+load_all(as.package("../onlineforecast"))
+library(onlineforecast)
+load_all(as.package("../../onlineforecast"))
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+load_all(as.package("../onlineforecast"))
+setwd("~/Documents/phd/Projects/Git/onlineforecast/misc-R")
+load_all(as.package("../onlineforecast"))
+load_all(as.package("../../onlineforecast"))
+## Demo on how the AR part works in onlineforecast
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+class(Dbuildingheatload)
+D <- Dbuildingheatload
+D$y <- D$Heatload$house9
+plot_ts(D, c("^y","Ta"), kseq=c(0,12))
+plot_ts(D, c("^y","Ta"), "2010-12-15", "2010-12-25", kseq=c(0,12))
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$fiteval <- period("2010-12-20", Dtrain$t)
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()")
+model$add_regp("rls_prm(lambda=0.9)")
+model$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+model$kseq <- c(1,18)
+model$p <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+model$kseq <- 1:36
+val <- rls_fit(model$p, model, D, returnanalysis = TRUE)
+D$Yhat <- val$Yhat
+i <- 200
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+iseq <- which(period("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(model$p, model, Dfit)
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i)
+Dnew_transformed <- model$transform_data(Dnew)
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+yhat <- rls_predict(model, Dnew_transformed)
+## AR part
+#source("AR.R")
+modelAR <- forecastmodel$new()
+modelAR$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()",
+AR = "AR(lags=c(0))")
+modelAR$add_regp("rls_prm(lambda=0.9)")
+modelAR$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+modelAR$p <- rls_optim(modelAR, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+modelAR$kseq <- 1:36
+valAR <- rls_fit(modelAR$p, modelAR, D, returnanalysis = TRUE)
+D$YhatAR <- valAR$Yhat
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-02-01", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-01-10", kseq = c(1,18))
+i <- 200
+iseq <- i+modelAR$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+lines(D$t[iseq], D$YhatAR[i, ], type = "b", col = 4)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)"),
+pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+sqrt(mean( as.numeric((D$y[iseq] -  D$Yhat[i, ])^2), na.rm = T))
+sqrt(mean( as.numeric((D$y[iseq] -  D$YhatAR[i, ])^2), na.rm = T))
+## Update
+## THINKABOUT WITH bigger lag and more new observation!!!
+model <- forecastmodel$new()
+model$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)",
+I = "lp(I, a1=0.7)",
+mu_tday = "fs(tday/24, nharmonics=10)",
+mu = "ones()",
+AR = "AR(lags=c(0,1,4))")
+modelAR$add_regp("rls_prm(lambda=0.9)")
+modelAR$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999),
+I__a1 =  c(0.4, 0.8, 0.9999),
+lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+iseq <- which(period("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(modelAR$p, modelAR, Dfit)
+str(modelAR$Lfits[1:2])
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i:(i))
+Dnew$y
+modelAR$yAR
+D$y[(i-6):i]
+Dnew_transformed <- modelAR$transform_data(Dnew)
+Dnew_transformed$AR.lag0
+Dnew_transformed$AR.lag1
+Dnew_transformed$AR.lag4
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+## modelAR$yAR
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i:(i))
+## Dnew$y
+## modelAR$yAR
+## tail(Dfit$y)
+## Dnew_transformed <- modelAR$transform_data(Dnew)
+## Dnew_transformed$AR.lag3
+## Dnew_transformed$AR.Order2
+## Dnew_transformed$AR.Order1
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+## modelAR$yAR
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i)
+## Dnew_transformed <- model$transform_data(Dnew)
+## rls_update(model, Dnew_transformed, Dnew[[model$output]])
+## yhat <- rls_predict(model, Dnew_transformed)
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+valAR$Yhat
+head(D)
+head(D$t)
+dim(valAR$Yhat)
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:dim(valAR$Yhat)[2]])
+dummyDF
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:dim(valAR$Yhat)[2]],
+k = 1:(dim(valAR$Yhat)[2]))
+dummyDF
+predictionDataFrame <- function()
+{
+head(D$t)
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:dim(valAR$Yhat)[2]],
+k = 1:(dim(valAR$Yhat)[2]))
+dummyDF
+}
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:dim(valAR$Yhat)[2]],
+k = 1:(dim(valAR$Yhat)[2]))
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:(dim(valAR$Yhat)[2]+1)],
+k = 1:(dim(valAR$Yhat)[2]))
+dummyDF
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:(dim(valAR$Yhat)[2]+1)],
+k = 1:(dim(valAR$Yhat)[2]),
+Pred = valAR$Yhat[1,])
+valAR$Yhat[1,]
+dummyDF <- data.frame(PredTime = D$t[1],
+Time = D$t[2:(dim(valAR$Yhat)[2]+1)],
+k = 1:(dim(valAR$Yhat)[2]),
+Pred = as.numeric(valAR$Yhat[1,]))
+dummyDF
diff --git a/misc-R/arfunction.R b/misc-R/arfunction.R
new file mode 100644
index 0000000000000000000000000000000000000000..296ec2cbed2842390bd79b8edb7f31cf70714739
--- /dev/null
+++ b/misc-R/arfunction.R
@@ -0,0 +1,190 @@
+## Demo on how the AR part works in onlineforecast
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+class(Dbuildingheatload)
+D <- Dbuildingheatload
+D$y <- D$heatload
+
+plot_ts(D, c("^y","Ta"), kseq=c(0,12))
+plot_ts(D, c("^y","Ta"), "2010-12-15", "2010-12-25", kseq=c(0,12))
+
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$scoreperiod <- in_range("2010-12-20", Dtrain$t)
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+                 I = "lp(I, a1=0.7)", 
+                 mu_tday = "fs(tday/24, nharmonics=10)",
+                 mu = "ones()")
+model$add_regprm("rls_prm(lambda=0.9)")
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                    I__a1 =  c(0.4, 0.8, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+model$kseq <- 1:36
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+D$Yhat <- val$Yhat
+
+i <- 200
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+)
+iseq <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(model$prm, model, Dfit, returnanalysis = FALSE)
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i)
+Dnew_transformed <- model$transform_data(Dnew)
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+yhat <- rls_predict(model, Dnew_transformed)
+
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], yhat, type = "b", col = 2)
+legend("topright", c("observations",pst("predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+
+## AR part
+modelAR <- forecastmodel$new()
+modelAR$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+                   I = "lp(I, a1=0.7)", 
+                   mu_tday = "fs(tday/24, nharmonics=10)",
+                   mu = "ones()", 
+                   AR = "AR(lags=c(0))")
+modelAR$add_regprm("rls_prm(lambda=0.9)")
+modelAR$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                      I__a1 =  c(0.4, 0.8, 0.9999),
+                      lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+
+modelAR$prm <- rls_optim(modelAR, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+modelAR$kseq <- 1:36
+valAR <- rls_fit(modelAR$prm, modelAR, D, returnanalysis = TRUE)
+names(valAR)
+names(modelAR)
+
+D$YhatAR <- valAR$Yhat
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-02-01", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-01-10", kseq = c(1,18))
+i <- 200
+iseq <- i+modelAR$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+lines(D$t[iseq], D$YhatAR[i, ], type = "b", col = 4)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)"),
+                     pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+
+sqrt(mean( as.numeric((D$y[iseq] -  D$Yhat[i, ])^2), na.rm = T))
+sqrt(mean( as.numeric((D$y[iseq] -  D$YhatAR[i, ])^2), na.rm = T))
+
+## Prediction Data Frame
+PredictionDF <- long_format(fit = valAR, Time = D$t) # long_format(fit = valAR, Time = D$t)
+head(PredictionDF[2000:2500,], 50)
+head(PredictionDF[(36*36):(36*36+36),],40)
+
+
+i <- 200
+iseq <- i+modelAR$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+lines(PredictionDF[PredictionDF$PredTime == D$t[i],]$Time, PredictionDF[PredictionDF$PredTime == D$t[i],]$Pred, type = "b", col = 5, cex = 0.4)
+lines(D$t[iseq], D$YhatAR[i, ], type = "b", col = 4)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)"),
+                     pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4)
+)
+
+## Selecting just the one hour forecasts
+OneHourPred <- PredictionDF[PredictionDF$k == 1,]
+length(OneHourPred$PredTime)
+head(OneHourPred)
+length(valAR$Yhat$k1)
+head(valAR$Yhat$k1)
+
+plot(OneHourPred$Time, OneHourPred$Pred, type = "l")
+lines(D$t, D$y, col = "red")
+
+## Update
+
+## THINKABOUT WITH bigger lag and more new observation!!!
+
+model <- forecastmodel$new()
+model$output = "y"
+modelAR$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+                   I = "lp(I, a1=0.7)", 
+                   mu_tday = "fs(tday/24, nharmonics=10)",
+                   mu = "ones()", 
+                   AR = "AR(lags=c(0,1,4))")
+modelAR$add_regprm("rls_prm(lambda=0.9)")
+modelAR$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                      I__a1 =  c(0.4, 0.8, 0.9999),
+                      lambda = c(0.9, 0.99, 0.9999))
+modelAR$kseq <- c(1,18)
+
+
+iseq <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(modelAR$prm, modelAR, Dfit, returnanalysis = FALSE)
+str(modelAR$Lfits[1:2])
+
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i:(i))
+Dnew$y
+
+modelAR$yAR
+D$y[(i-6):i]
+Dnew_transformed <- modelAR$transform_data(Dnew)
+
+Dnew_transformed$AR.lag0
+Dnew_transformed$AR.lag1
+Dnew_transformed$AR.lag4
+
+
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+
+## modelAR$yAR
+
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
+
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i:(i))
+## Dnew$y
+
+## modelAR$yAR
+## tail(Dfit$y)
+## Dnew_transformed <- modelAR$transform_data(Dnew)
+
+## Dnew_transformed$AR.lag3
+## Dnew_transformed$AR.Order2
+## Dnew_transformed$AR.Order1
+
+
+## modelAR$yAR
+## rls_update(modelAR, Dnew_transformed, Dnew[[model$output]])
+## yhatAR <- rls_predict(modelAR, Dnew_transformed)
+
+## modelAR$yAR
+
+## (i <- iseq[length(iseq)] + 2)
+## Dnew <- subset(D, i)
+## Dnew_transformed <- model$transform_data(Dnew)
+## rls_update(model, Dnew_transformed, Dnew[[model$output]])
+## yhat <- rls_predict(model, Dnew_transformed)
+
+## iseq <- i+modelAR$kseq
+## plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+## lines(D$t[iseq], yhat[1,], type = "b", col = 2)
+## lines(D$t[iseq], yhatAR[1,], type = "b", col = 4)
+## legend("topright", c("observations",pst("predictions (",min(modelAR$kseq)," to ",max(modelAR$kseq)," steps ahead)"),
+##                      pst("Predictions AR (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = c(1,2,4))
diff --git a/misc-R/building-electricity-load-forecast.Rmd b/misc-R/building-electricity-load-forecast.Rmd
new file mode 100644
index 0000000000000000000000000000000000000000..fd42e149f438221314a2188b0a0e15ee8f3cd2b3
--- /dev/null
+++ b/misc-R/building-electricity-load-forecast.Rmd
@@ -0,0 +1,223 @@
+---
+title: "DRAFT Building electricity load forecasting"
+author: "Peder Bacher"
+date: "`r Sys.Date()`"
+output: rmarkdown::html_vignette
+vignette: >
+  %\VignetteIndexEntry{Building heat load forecasting}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding{UTF-8}
+bibliography: literature.bib
+---
+
+```{r setup, include = FALSE, purl = FALSE}
+knitr::opts_chunk$set(
+  collapse = TRUE,
+  comment = ">",
+  cache = FALSE, # Somehow it doesn't work
+  fig.height=6.5, 
+  fig.width=13, 
+  out.width="685px"
+)
+options(digits=3)
+```
+
+## Intro
+This vignette presents an example of using the onlineforecasting
+package for fitting a model and calculating forecasts as carried out
+by @Bacher2012.
+
+## Data
+Data for the forecasting examples is taken from a data set collected
+in Sønderborg, Denmark. It comprises heat load measurements for
+sixteen houses, together with local climate observations and weather
+forecasts (NWPs). The houses are generally built in the sixties and
+seventies, with a floor plan in the range of 85 to 170 $\mr{m^2}$, and
+constructed in bricks. For each house only the total heat load, including both space heating and hot tab water heating, is available. The climate observations are measured at the local district heating plant within 10 kilometers from the houses. The NWPs are from the HIRLAM-S05 model and provided by the Danish Meteorological Institute. All times are in UTC and the time stamp for average values are set to the end of the time interval.
+
+Load the package:
+```{r}
+##library(devtools)
+##load_all(as.package("../../onlineforecast"), export_all=FALSE)
+## Remember
+##install.packages("onlineforecast_0.1.0.tar.gz")
+library(onlineforecast)
+```
+
+The "Dbuilding" data is included in the package. Its a data.list (see the vignette onlineforecasting.pdf):
+```{r}
+Dbuilding <- readRDS("Dbuilding.Rda")
+```
+
+Keep it in D and see the content:
+```{r}
+D <- Dbuilding
+names(D)
+```
+
+The time:
+```{r}
+head(D$t)
+```
+
+The observed heat load (in kW) of the different houses are kept in a data.frame:
+```{r}
+head(D$Electricityload$house1)
+```
+
+The Numerical Weather Predictions (NWPs) of ambient temperature steps 0 to 8 hours ahead are:
+```{r}
+head(D$Ta[ ,1:9])
+```
+So at "2008-12-01 01:00:00 GMT" the latest available forecasts is the first row of Ta.
+
+Choose the heat load of House 9 for the example, just keep it as a vector y:
+```{r}
+D$y <- D$Electricityload$house3
+```
+
+A time series plot, see "?plot_ts.data.list" (Note how the forecasts Ta are lagged to be synced with observations (i.e. then also with each other))
+```{r}
+plot_ts(D, c("^y","Ta"), kseq=c(1,12))
+```
+
+A shorter period:
+```{r}
+plot_ts(D, c("^y","Ta"), "2010-12-15", "2010-12-25", kseq=c(1,12))
+
+plot_ts(D, c("^y","Ta"), "2009-12-15", "2011-12-25", kseq=c(1,12))
+
+
+tmp <- subset(D, c("2010-12-15", "2011-12-15"), kseq=1:6, pattern=c("^y|^Ta"))
+pairs(tmp)
+```
+
+Set the index of the training period and which period to evaluate (when fitting the points with scoreperiod==false are not included in the score evaluation)
+```{r}
+Dtrain <- subset(D, c("2010-06-01", "2011-06-01"))
+Dtrain$scoreperiod <- in_range("2010-06-10", Dtrain$t)
+```
+
+Define a model
+```{r}
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+                 I = "lp(I, a1=0.7)", 
+                 mu_tday = "fs(tday/24, nharmonics=10)",
+                 mu = "ones()")
+model$add_regprm("rls_prm(lambda=0.9)")
+```
+
+Define the parameters to be optimized offline (their lower, init and upper bound)
+```{r}
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                    I__a1 =  c(0.4, 0.8, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+```
+
+Tune the parameters: first set the horizons to run and the 
+```{r, results="hide"}
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "cache-building-heat-load-forecasting")$par
+```
+
+Now fit with the optimized parameters on the entire period
+```{r}
+model$kseq <- 1:36
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+```
+
+Plot the forecasts (Yhat adheres to the forecast matrix format and in plot_ts the forecasts are lagged k steps to sync with the observations)
+```{r, fig.height=4}
+D$Yhat <- val$Yhat
+plot_ts(D, c("^y|^Y"), "2010-06-01", "2011-06-01", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2010-07-01", "2010-07-15", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2010-08-01", "2010-08-05", kseq = c(1,18))
+plot_ts(D, c("^y|^Y"), "2010-12-15", "2010-12-30", kseq = c(1,18))
+```
+
+Plot a forecast for a particular time point
+```{r, fig.height=4}
+i <- 5000
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+```
+
+### Recursive update and prediction
+First fit on a period
+```{r}
+iseq <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(model$prm, model, Dfit)
+```
+
+Now the fits are saved in the model object (its an R6 object, hence passed by reference to the functions and can be changed inside the functions). A list of fits with an entry for each horizon is in Lfits, see the two first
+```{r}
+str(model$Lfits[1:2])
+```
+
+Now new data arrives, take the point right after the fit period
+```{r}
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i)
+```
+
+First we need to transform the new data (This must only be done once for each new data, since some transform functions, e.g. lp(), actually keep states, see the detailed vignette onlineforecasting.pdf)
+```{r}
+Dnew_transformed <- model$transform_data(Dnew)
+```
+
+Then we can update the parameters using the transformed data
+```{r}
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+```
+
+Calculate predictions using the new data and the updated fits (rls coefficient estimates in model$Lfits[[k]]$theta)
+```{r}
+yhat <- rls_predict(model, Dnew_transformed)
+```
+
+Plot to see that it fits the observations
+```{r}
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], yhat, type = "b", col = 2)
+legend("topright", c("observations",pst("predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+```
+
+Run this for a longer period to verify that the same forecasts are obtained (in one go vs. iteratively)
+
+First in one go
+```{r}
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+D$Yhat1 <- val$Yhat
+```
+
+and then iteratively
+```{r}
+itrain <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+itest <- which(in_range("2011-01-01",D$t,"2011-01-04"))
+rls_fit(model$prm, model, subset(D, itrain))
+
+D$Yhat2 <- data.frame(matrix(NA, nrow(D$Yhat1), ncol(D$Yhat1)))
+names(D$Yhat2) <- names(D$Yhat1)
+for(i in itest){
+    print(i)
+    Dnew <- subset(D, i)
+    Dnewtr <- model$transform_data(Dnew)
+    rls_update(model, Dnewtr, Dnew[[model$output]])
+    D$Yhat2[i, ] <- as.numeric(rls_predict(model, Dnewtr))
+}
+```
+
+Compare to see the difference between the one step forecasts
+```{r}
+D$Yhat1$k1[itest] - D$Yhat2$k1[itest]
+plot(D$Yhat1$k1[itest], type="b")
+lines(D$Yhat2$k1[itest], type="b", col=2)
+```
+
+## Literature
diff --git a/misc-R/building-heat-load-forecasting-AR-and-error-models.R b/misc-R/building-heat-load-forecasting-AR-and-error-models.R
new file mode 100644
index 0000000000000000000000000000000000000000..54c7b57a22ca9bd2a20658564525822b26ec657d
--- /dev/null
+++ b/misc-R/building-heat-load-forecasting-AR-and-error-models.R
@@ -0,0 +1,277 @@
+## ----------------------------------------------------------------
+## Load the current version directly from the folder
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+
+## ------------------------------------------------------------------------
+D <- Dbuildingheatload
+D$y <- D$heatload ## Here we are missing this one! so results might not be as interesting: D$Heatload$house15
+plot_ts(D, c("^y","Ta"), kseq=c(1,12))
+plot_ts(D, c("^y","Ta"), "2010-12-15", "2011-01-10", kseq=c(1,12))
+
+
+## ------------------------------------------------------------------------
+D$scoreperiod <- period("2010-12-20", D$t)
+itrain <- period(D$t, "2011-01-01")
+ieval <- period("2011-01-01", D$t)
+
+
+## ------------------------------------------------------------------------
+## Model with no AR
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(
+          Ta = "lp(Ta, a1=0.9)", 
+          I = "lp(I, a1=0.7)", 
+          mu_tday = "fs(tday/24, nharmonics=10)",
+          mu = "ones()")
+model$add_regp("rls_prm(lambda=0.9)")
+##
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+             I__a1 =  c(0.4, 0.8, 0.9999),
+             lambda = c(0.9, 0.99, 0.9999))
+##
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, subset(D,itrain), control=list(maxit=2))$par
+##
+model$kseq <- 1:36
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+
+## ------------------------------------------------------------------------
+## Investigate the one step residuals, lag them to match the observations
+D$resid <- lag(val$Resid$k1, 1)
+
+plot(D$t[ieval], D$resid[ieval])
+
+acf(D$resid[ieval], na.action=na.pass)
+
+par(mfrow=c(1,2))
+plot(D$y[ieval], D$resid[ieval])
+plot(D$resid[ieval], val$Resid$k1[ieval])
+
+
+
+## ------------------------------------------------------------------------
+## Add an AR part
+model$add_inputs(AR = "AR(0)")
+##
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, subset(D,itrain), control=list(maxit=2))$par
+##
+model$kseq <- 1:36
+valAR <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+
+## ------------------------------------------------------------------------
+## An MA model for the k=1 ahead (AR on the residuals)
+modelMA <- forecastmodel$new()
+modelMA$output <- "resid"
+modelMA$add_inputs(AR = "AR(0)")
+modelMA$add_regp("rls_prm(lambda=0.9)")
+##
+modelMA$add_prmbounds(lambda = c(0.9, 0.99, 0.9999))
+##
+modelMA$kseq <- 1
+modelMA$prm <- rls_optim(modelMA, subset(D,itrain), control=list(maxit=2), cachedir = "")$par
+##
+valMA <- rls_fit(modelMA$prm, modelMA, D, returnanalysis = TRUE)
+
+
+## ------------------------------------------------------------------------
+## See the k step forecasts
+ieval <- period("2011-01-01", D$t)
+
+plot(D$t[ieval], lag(val$Resid$k1[ieval],1), type = "l", col = 1)
+lines(D$t[ieval], lag(valAR$Resid$k1[ieval],1), type = "l", col = 2)
+lines(D$t[ieval], lag(valMA$Resid$k1[ieval],1), type = "l", col = 3)
+
+par(mfrow=c(1,3))
+acf(val$Resid$k1[ieval], na.action=na.pass)
+acf(valAR$Resid$k1[ieval], na.action=na.pass)
+acf(valMA$Resid$k1[ieval], na.action=na.pass)
+
+rmse(val$Resid$k1[ieval])
+rmse(valAR$Resid$k1[ieval])
+rmse(valMA$Resid$k1[ieval])
+
+
+
+## ------------------------------------------------------------------------
+## Fit the MA (AR error) model for each horizon
+L <- lapply(1:36, function(k){
+    ## The k step residuals from the no AR model
+    D$resid <- lag(valAR$Resid[ ,k], k)
+    ##
+    modelMA$kseq <- k
+    ##
+    modelMA$prm <- rls_optim(modelMA, subset(D,itrain), control=list(maxit=2), cachedir = "")$par
+    ##
+    valMA <- rls_fit(modelMA$prm, modelMA, D, returnanalysis = TRUE)
+    valMA$Resid
+})
+valMAmulti <- list()
+valMAmulti$Resid <- do.call("cbind", L)
+
+
+
+## ------------------------------------------------------------------------
+## RMSE
+## Only the points which are not NA for all horizons, and have values for both models
+iAR <- apply(!is.na(valAR$Resid), 1, all)
+iMA <- apply(!is.na(valMAmulti$Resid), 1, all)
+i <- apply(!is.na(val$Resid), 1, all)
+i <- iAR & iMA & i
+## Only evaluation period
+i <- i & ieval
+##
+plot(val$Resid$k1[i], type="l")
+lines(valAR$Resid$k1[i], col=2)
+lines(valMAmulti$Resid$k1[i], col=3)
+##
+tmpAR <- apply(valAR$Resid[i, ], 2, rmse)
+tmpMA <- apply(valMAmulti$Resid[i, ], 2, rmse)
+tmp <- apply(val$Resid[i, ], 2, rmse)
+plot(tmpAR, type="b", ylim=range(tmp,tmpAR))
+points(tmpMA, type="b", col=2)
+points(tmp, type="b")
+
+acf(valMAmulti$Resid$k1[i], na.action=na.pass)
+pacf(valMAmulti$Resid$k1[i], na.action=na.pass)
+
+plot(lag(val$Resid$k1[i], 1), val$Resid$k1[i])
+plot(lag(valMAmulti$Resid$k1[i], 1), valMAmulti$Resid$k1[i])
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## ------------------------------------------------------------------------
+## RMSE
+## Only the points which are not NA for all horizons, and have values for both models
+iAR <- apply(!is.na(valAR$Resid), 1, all)
+i <- apply(!is.na(val$Resid), 1, all)
+i <- iAR & i
+## Remove start
+i[1:200] <- FALSE
+##
+tmpAR <- apply(valAR$Resid[i, ], 2, rmse)
+tmp <- apply(val$Resid[i, ], 2, rmse)
+plot(model$kseq, tmpAR, type="b", ylim=range(tmp,tmpAR))
+points(model$kseq, tmp, type="b")
+
+acf(val$Resid$k1[i], na.action=na.pass)
+pacf(val$Resid$k1[i], na.action=na.pass)
+
+plot(lag(val$Resid$k1[i], 1), val$Resid$k1[i])
+plot(lag(val$Resid$k2[i], 2), val$Resid$k2)
+
+y <- val$Resid$k1[i]
+x <- lag(val$Resid$k1[i], 1)
+fit <- lm(y ~ 0 + x)
+plot(fit$residuals)
+rmse(fit$residuals)
+rmse(fit$residuals)
+
+## ##
+## setpar("ts", mfrow=c(ncol(val$Resid),1))
+## apply(val$Resid[i, ], 2, plot, type="l")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Error model
+
+In the applied model there is no auto-regressive part, hence it is almost
+certain that there is auto-correlation in the residuals - which, for the shorter
+horizons, can be used to improve the forecasts.
+
+Check the auto-correlation for the one-step residuals
+
+
+acf(val$Resid$k1[i], na.action=na.pass)
+acf(val$Resid$k2[i], na.action=na.pass)
+acf(val$Resid$k3[i], na.action=na.pass)
+
+## Take the error from the training period
+
+D$residual.k1 <- lag(val$Resid$k1, 1)
+Dtrain$residual.k1 <- D$residual.k1[D$t %in% Dtrain$t]
+
+##
+plot_ts(Dtrain, "residual.k1")
+
+model <- forecastmodel$new()
+model$output <- "residual.k1"
+model$add_inputs(
+          AR = "lp(AR(c(0)), a1=0.2)")
+model$add_regp("rls_prm(lambda=0.9)")
+##
+model$add_prmbounds(AR__a1 =  c(0.4, 0.8, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+##
+model$kseq <- c(1)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "")$par
+##
+model$kseq <- 1:36
+valMA <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+
+
+## ------------------------------------------------------------------------
+## RMSE
+## Only the points which are not NA for all horizons, and have values for both models
+iAR <- apply(!is.na(valAR$Resid), 1, all)
+iMA <- apply(!is.na(valMA$Resid), 1, all)
+i <- apply(!is.na(val$Resid), 1, all)
+i <- iAR & iMA & i
+## Remove start
+i[1:200] <- FALSE
+##
+plot(val$Resid$k1[i], type="l")
+lines(valAR$Resid$k1[i], col=2)
+lines(valMA$Resid$k1[i], col=3)
+##
+tmpAR <- apply(valAR$Resid[i, ], 2, rmse)
+tmpMA <- apply(valMA$Resid[i, ], 2, rmse)
+tmp <- apply(val$Resid[i, ], 2, rmse)
+plot(model$kseq, tmpAR, type="b", ylim=range(tmp,tmpAR))
+points(model$kseq, tmpMA, type="b")
+points(model$kseq, tmp, type="b")
+
+acf(valMA$Resid$k1[i], na.action=na.pass)
+pacf(valMA$Resid$k1[i], na.action=na.pass)
+
+plot(lag(val$Resid$k1[i], 1), val$Resid$k1[i])
+plot(lag(valMA$Resid$k1[i], 1), valMA$Resid$k1[i])
diff --git a/misc-R/building-heat-load-forecasting-test-AR.R b/misc-R/building-heat-load-forecasting-test-AR.R
new file mode 100644
index 0000000000000000000000000000000000000000..79420967e49cc71b5eb302d44556e3dbacb064e1
--- /dev/null
+++ b/misc-R/building-heat-load-forecasting-test-AR.R
@@ -0,0 +1,310 @@
+## ----------------------------------------------------------------
+## Load the current version directly from the folder
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+
+## ------------------------------------------------------------------------
+class(Dbuildingheatload)
+
+
+## ------------------------------------------------------------------------
+D <- Dbuildingheatload
+names(D)
+
+
+## ------------------------------------------------------------------------
+head(D$t)
+
+
+## ------------------------------------------------------------------------
+head(D$Heatload[ ,1:9])
+
+
+## ------------------------------------------------------------------------
+head(D$Ta[ ,1:9])
+
+
+## ------------------------------------------------------------------------
+D$y <- D$Heatload$house15
+## ## ------------------------------------------------------------------------
+ plot_ts(D, c("^y","Ta"), kseq=c(1,12))
+
+
+## ## ------------------------------------------------------------------------
+ plot_ts(D, c("^y","Ta"), "2010-12-15", "2010-12-25", kseq=c(1,12))
+
+
+## ------------------------------------------------------------------------
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$scoreperiod <- in_range("2010-12-20", Dtrain$t)
+
+
+## ------------------------------------------------------------------------
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(
+          Ta = "lp(Ta, a1=0.9)", 
+          I = "lp(I, a1=0.7)", 
+          mu_tday = "fs(tday/24, nharmonics=10)",
+          mu = "ones()",
+          AR = "lp(AR(c(0,1)), a1=0.2)")
+model$add_regprm("rls_prm(lambda=0.9)")
+
+## ## ------------------------------------------------------------------------
+## model <- forecastmodel$new()
+## model$output = "y"
+## model$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+##                  I = "lp(I, a1=0.7)", 
+##                  mu_tday = "fs(tday/24, nharmonics=10)",
+##                  mu = "ones()",
+##                  AR = "AR(c(1,2))")
+## model$add_regp("rls_prm(lambda=0.9)")
+
+
+
+## ------------------------------------------------------------------------
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                    I__a1 =  c(0.4, 0.8, 0.9999),
+                    AR__a1 =  c(0.4, 0.8, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+
+
+model$kseq <- c(1,18)
+#datatr <- model$transform_data(Dtrain)
+#oi
+
+#val <- rls_fit(model$get_prmbounds("init"), model, D, returnanalysis = TRUE)
+
+##model2 <- forecastmodel$new()
+
+## ---- results="hide"-----------------------------------------------------
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "building-heat-load-forecasting_cache-rls")$par
+
+
+## ------------------------------------------------------------------------
+model$kseq <- 1:36
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+model$reset_state()
+datatr <- model$transform_data(Dtrain)
+
+## ---- fig.height=4-------------------------------------------------------
+D$Yhat <- val$Yhat
+plot_ts(D, c("^y|^Y"), "2011-01-01", "2011-02-01", kseq = c(1,18))
+
+
+## ---- fig.height=4-------------------------------------------------------
+i <- 200
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], D$Yhat[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+
+
+## ------------------------------------------------------------------------
+iseq <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+rls_fit(model$prm, model, Dfit)
+
+
+## ------------------------------------------------------------------------
+str(model$Lfits[1:2])
+
+
+## ------------------------------------------------------------------------
+(i <- iseq[length(iseq)] + 1)
+##i <- i:(i+1)
+Dnew <- subset(D, i)
+
+
+## ------------------------------------------------------------------------
+Dnew_transformed <- model$transform_data(Dnew)
+
+
+## ------------------------------------------------------------------------
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+
+
+## ------------------------------------------------------------------------
+yhat <- rls_predict(model, Dnew_transformed)
+
+
+## ------------------------------------------------------------------------
+iseq <- i+model$kseq
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], yhat, type = "b", col = 2)
+legend("topright", c("observations",pst("predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+
+
+## ------------------------------------------------------------------------
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+D$Yhat1 <- val$Yhat
+
+
+## ------------------------------------------------------------------------
+itrain <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+itest <- which(in_range("2011-01-01",D$t,"2011-01-04"))
+rls_fit(model$prm, model, subset(D, itrain))
+
+D$Yhat2 <- data.frame(matrix(NA, nrow(D$Yhat1), ncol(D$Yhat1)))
+names(D$Yhat2) <- names(D$Yhat1)
+for(i in itest){
+    print(i)
+    Dnew <- subset(D, i)
+    Dnewtr <- model$transform_data(Dnew)
+    rls_update(model, Dnewtr, Dnew[[model$output]])
+    D$Yhat2[i, ] <- as.numeric(rls_predict(model, Dnewtr))
+}
+
+
+## ------------------------------------------------------------------------
+D$Yhat1$k1[itest] - D$Yhat2$k1[itest]
+
+
+valAR <- val
+
+
+## ------------------------------------------------------------------------
+## Compare with model with no AR
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(
+          Ta = "lp(Ta, a1=0.9)", 
+          I = "lp(I, a1=0.7)", 
+          mu_tday = "fs(tday/24, nharmonics=10)",
+          mu = "ones()")
+model$add_regp("rls_prm(lambda=0.9)")
+##
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+             I__a1 =  c(0.4, 0.8, 0.9999),
+             lambda = c(0.9, 0.99, 0.9999))
+##
+model$kseq <- c(1,18)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir = "building-heat-load-forecasting_cache-rls")$par
+##
+model$kseq <- 1:36
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+
+## ------------------------------------------------------------------------
+## See the k step forecasts
+i <- 200
+iseq <- i:(i+7*24)
+plot(D$t[iseq], D$y[iseq], type = "b", xlab = "t", ylab = "y")
+k <- 1
+lines(D$t[iseq], lag(val$Yhat[iseq,k],k), type = "b", col = 2)
+lines(D$t[iseq], lag(valAR$Yhat[iseq,k],k), type = "b", col = 3)
+#legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+
+
+
+## ------------------------------------------------------------------------
+## RMSE
+## Only the points which are not NA for all horizons, and have values for both models
+iAR <- apply(!is.na(valAR$Resid), 1, all)
+i <- apply(!is.na(val$Resid), 1, all)
+i <- iAR & i
+## Remove start
+i[1:200] <- FALSE
+##
+tmpAR <- apply(valAR$Resid[i, ], 2, rmse)
+tmp <- apply(val$Resid[i, ], 2, rmse)
+plot(model$kseq, tmpAR, type="b", ylim=range(tmp,tmpAR))
+points(model$kseq, tmp, type="b")
+
+acf(val$Resid$k1[i], na.action=na.pass)
+pacf(val$Resid$k1[i], na.action=na.pass)
+
+plot(lag(val$Resid$k1[i], 1), val$Resid$k1[i])
+plot(lag(val$Resid$k2[i], 2), val$Resid$k2)
+
+y <- val$Resid$k1[i]
+x <- lag(val$Resid$k1[i], 1)
+fit <- lm(y ~ 0 + x)
+plot(fit$residuals)
+rmse(fit$residuals)
+rmse(fit$residuals)
+
+## ##
+## setpar("ts", mfrow=c(ncol(val$Resid),1))
+## apply(val$Resid[i, ], 2, plot, type="l")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Error model
+
+In the applied model there is no auto-regressive part, hence it is almost
+certain that there is auto-correlation in the residuals - which, for the shorter
+horizons, can be used to improve the forecasts.
+
+Check the auto-correlation for the one-step residuals
+
+
+acf(val$Resid$k1[i], na.action=na.pass)
+acf(val$Resid$k2[i], na.action=na.pass)
+acf(val$Resid$k3[i], na.action=na.pass)
+
+## Take the error from the training period
+
+D$residual.k1 <- lag(val$Resid$k1, 1)
+Dtrain$residual.k1 <- D$residual.k1[D$t %in% Dtrain$t]
+
+##
+plot_ts(Dtrain, "residual.k1")
+
+model <- forecastmodel$new()
+model$output <- "residual.k1"
+model$add_inputs(
+          AR = "lp(AR(c(0)), a1=0.2)")
+model$add_regp("rls_prm(lambda=0.9)")
+##
+model$add_prmbounds()(AR__a1 =  c(0.4, 0.8, 0.9999),
+             lambda = c(0.9, 0.99, 0.9999))
+##
+model$kseq <- c(1)
+model$prm <- rls_optim(model, Dtrain, control=list(maxit=2))$par
+##
+model$kseq <- 1:36
+valMA <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+
+
+
+## ------------------------------------------------------------------------
+## RMSE
+## Only the points which are not NA for all horizons, and have values for both models
+iAR <- apply(!is.na(valAR$Resid), 1, all)
+iMA <- apply(!is.na(valMA$Resid), 1, all)
+i <- apply(!is.na(val$Resid), 1, all)
+i <- iAR & iMA & i
+## Remove start
+i[1:200] <- FALSE
+##
+plot(val$Resid$k1[i], type="l")
+lines(valAR$Resid$k1[i], col=2)
+lines(valMA$Resid$k1[i], col=3)
+##
+tmpAR <- apply(valAR$Resid[i, ], 2, rmse)
+tmpMA <- apply(valMA$Resid[i, ], 2, rmse)
+tmp <- apply(val$Resid[i, ], 2, rmse)
+plot(model$kseq, tmpAR, type="b", ylim=range(tmp,tmpAR))
+points(model$kseq, tmpMA, type="b")
+points(model$kseq, tmp, type="b")
+
+acf(valMA$Resid$k1[i], na.action=na.pass)
+pacf(valMA$Resid$k1[i], na.action=na.pass)
+
+plot(lag(val$Resid$k1[i], 1), val$Resid$k1[i])
+plot(lag(valMA$Resid$k1[i], 1), valMA$Resid$k1[i])
diff --git a/misc-R/data_soenderborg.csv b/misc-R/data_soenderborg.csv
new file mode 100644
index 0000000000000000000000000000000000000000..df9475eaf84fddf616d990792fa2f9701c2e383d
Binary files /dev/null and b/misc-R/data_soenderborg.csv differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218597.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218597.rda
new file mode 100644
index 0000000000000000000000000000000000000000..9564ec094a40dcac87af30d58e86b7a73f069308
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218597.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218598.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218598.rda
new file mode 100644
index 0000000000000000000000000000000000000000..a07a383a2c82367a7ebd96ede563c1937ff69a26
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4218598.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4711176.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4711176.rda
new file mode 100644
index 0000000000000000000000000000000000000000..03058f1f0a5c1526e260aeefe7f583db4f4febfa
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4711176.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4724106.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4724106.rda
new file mode 100644
index 0000000000000000000000000000000000000000..df94d6800acc4ca2627f69066c56ec7041e7c1f9
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4724106.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4836681.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4836681.rda
new file mode 100644
index 0000000000000000000000000000000000000000..dd1718f1cfe956a7e6fc305f918097d7cc6140c5
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4836681.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4964553.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4964553.rda
new file mode 100644
index 0000000000000000000000000000000000000000..ada23acbb129bd9b074083b134b4f506357d49fa
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_4964553.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5036505.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5036505.rda
new file mode 100644
index 0000000000000000000000000000000000000000..5a3d750f93449ea78e821a22605bff05d9343a52
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5036505.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5107720.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5107720.rda
new file mode 100644
index 0000000000000000000000000000000000000000..8d309aaa5574a7062dbfbca43ab8ba51120a3834
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5107720.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5159799.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5159799.rda
new file mode 100644
index 0000000000000000000000000000000000000000..822d4e986c618223e7422aa9cfb7efc7b6564083
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5159799.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5164534.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5164534.rda
new file mode 100644
index 0000000000000000000000000000000000000000..ca7f47ad4d4b0bd8168de3eeb9d666a7a31eb09a
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5164534.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5183232.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5183232.rda
new file mode 100644
index 0000000000000000000000000000000000000000..2cf1561fa66ac2218e197467f1208295bd62fbf2
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5183232.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5193768.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5193768.rda
new file mode 100644
index 0000000000000000000000000000000000000000..837f6bd411c7c83a0e2ed55be09c71eb2d3a38ee
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5193768.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194732.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194732.rda
new file mode 100644
index 0000000000000000000000000000000000000000..be52c3c44a4dc5ca28f0c6aec10af0dbc9f45417
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194732.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194965.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194965.rda
new file mode 100644
index 0000000000000000000000000000000000000000..e129d9a4ed0e015b7dc7fb7270bdf0f05312289e
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5194965.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5197381.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5197381.rda
new file mode 100644
index 0000000000000000000000000000000000000000..6bfeccb293b7b2d3722bf6da8cdb4fe4cf82915a
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5197381.rda differ
diff --git a/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5223036.rda b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5223036.rda
new file mode 100644
index 0000000000000000000000000000000000000000..9d2104bc2b978b594e85ddc46ccb491a45a58400
Binary files /dev/null and b/misc-R/data_soenderborg_heatsplit/heatSplit.sp_1.sn_5223036.rda differ
diff --git a/misc-R/data_soenderborg_load.R b/misc-R/data_soenderborg_load.R
new file mode 100644
index 0000000000000000000000000000000000000000..b90dc01619d5adaacf3bbc40864eda6fc39ef8bb
--- /dev/null
+++ b/misc-R/data_soenderborg_load.R
@@ -0,0 +1,328 @@
+# setting work directory and libraries #
+rm(list = ls())
+
+# # Packages used
+# require(R6)
+require(data.table)
+# require(Rcpp)
+# require(splines)
+library(devtools)
+library(roxygen2)
+
+pack <- as.package("../../onlineforecast")
+load_all(pack)
+
+
+# Importing data # First unzip to get the .csv system('unzip
+# ../data/DataSoenderborg.zip')
+data_or <- fread("data_soenderborg.csv", sep = ",", header = TRUE)
+data_or[, `:=`(t, asct(data_or$t))]
+setDF(data_or)
+names(data_or)[names(data_or) == "Ig.obs"] <- "I.obs"
+
+# Make a data.table for each variable
+tmp <- unlist(getse(strsplit(names(data_or)[-1], "\\."), 2))
+colnm <- unlist(getse(strsplit(names(data_or)[-1], "\\."), 1))
+nms <- unique(colnm[grep("^k\\d*$", tmp)])
+
+kmax <- 48
+data <- list()
+data$t <- data_or$t
+for (ii in 1:length(nms)) {
+    # Find the columns
+    i <- grep(pst("^", nms[ii], "$"), unlist(getse(strsplit(names(data_or)[-1],"\\."), 1))) + 1
+    # Take only with kxx
+    i <- i[grep("k[[:digit:]]+$", names(data_or)[i])]
+    # 
+    #
+    data[[nms[ii]]] <- lag(data_or[ ,i], -1:-length(i))
+    names(data[[nms[ii]]]) <- pst("k", 1:length(i))
+    row.names(data[[nms[ii]]]) <- NULL
+    data[[nms[ii]]] <- as.data.frame(data[[nms[ii]]])
+    # Check if observed climate variable is there
+    nm <- pst(nms[[ii]], ".obs")
+    if (nm %in% names(data_or)) {
+        data[[nm]] <- data_or[, nm]
+    }
+}
+# More
+cols <- pst("Heatload.house", 1:16)
+data[["Heatload"]] <- data_or[, cols]
+names(data[["Heatload"]]) <- pst("house", 1:16)
+# 
+data[["cosAoi"]] <- data_or[, "cosAoi.obs"]
+data[["sunElevation"]] <- data_or[, "sunElevation.obs"]
+
+
+# # The time of day
+# ncol <- ncol(data$Ta)
+# tmp <- aslt(data$t)$hour
+# tmp <- matrix(tmp, nrow = length(tmp), ncol = ncol)
+# tmp <- data.frame(t(t(tmp) + (0:(ncol - 1))))
+# names(tmp) <- pst("k", 0:(ncol - 1))
+# data$tday <- tmp%%24
+
+#
+class(data) <- c("data.list","list")
+
+# Save for other scripts to read it
+#saveRDS(data, "data_soenderborg.RDS")
+
+
+data$heatloadtotal <- sapply(1:nrow(data$Heatload), function(i){
+    mean(unlist(data$Heatload[i, ]), na.rm=TRUE)
+})
+data$totaln <- sapply(1:nrow(data$Heatload), function(i){
+    sum(is.na(unlist(data$Heatload[i, ])))
+})
+plot(data$t, data$totaln)
+
+# Write for building heat load forecasting
+#Dbuildingheatload <- subset(data, c("2010-12-15","2011-03-01"), nms=c("t","Heatload","Ta","I","Ws","Wd","Ta.obs","I.obs","Wd.obs","Ws.obs","cosAoi","sunElevation","tday"))
+data$heatload <- data$Heatload$house9
+Dbuildingheatload <- subset(data, c("2010-12-15","2011-03-01"), nms=c("t","heatload","heatloadtotal","Ta.obs","I.obs","Ta","I"))
+rownames(Dbuildingheatload$Ta) <- NULL
+Dbuildingheatload$Ta <- Dbuildingheatload$Ta[ ,1:36]
+rownames(Dbuildingheatload$I) <- NULL
+Dbuildingheatload$I <- Dbuildingheatload$I[ ,1:36]
+#
+usethis::use_data(Dbuildingheatload, overwrite=TRUE) 
+
+
+
+
+#----------------------------------------------------------------
+# The electricity was not in the above data, so add it
+# Open the info
+info <- read.table("data_soenderborg_selectedInfo.csv",header=TRUE,sep=",")
+
+# Open the hourly data, and make one data.frame with the power for each house
+load("data_soenderborg_orig.rda")
+#load("../data/heat_sp-1.rda")
+D <- D[D$sn %in% info$sn, ]
+
+# Make the acc. el energy (kWh) into into power (kW)
+L <- split(D,D$sn)
+x <- L[[3]]
+Lre <- lapply(L, function(x){
+  x$el <- c(NA,diff(x$P2) / as.numeric(diff(x$t),unit="hours"))
+  # Remove outliers
+  x$el[x$el>100] <- NA
+  x$el[x$el<0] <- NA
+# plot(x$t,x$el,ylim=c(0,20))
+#   plot(x$t,cumsumWithNA(x$el))
+# plot(x$t,x$P2)  
+  return(x)
+})
+D <- do.call("rbind",Lre)
+
+# Make into a data.frame with each the power for each seperate house as columns
+L <- split(D[ ,c("t","el")],D$sn)
+X <- L[[1]]
+houseid <- info$houseid[ which(info$sn==as.numeric(names(L)[1])) ]
+names(X)[2] <- pst("el",houseid)
+# Rename the columns
+for(i in 2:length(L))
+  {
+    tmp <- L[[i]]
+    houseid <- info$houseid[ which(info$sn==as.numeric(names(L)[i])) ]
+    names(tmp)[2] <- pst("el",houseid)
+    X <- merge(X,tmp,by="t",all=TRUE)
+  }
+
+# To hourly
+X <- resample(X, ts=3600, tstart=asct("2008-12-01"), tend=asct("2011-05-01"))
+
+#---------
+# SOME OF THE FOLLOWING MIGHT BE ERRORFUL (some leftover stuff from a merge was making problems)
+# Read the splitted heat
+load(pst("data_soenderborg_heatsplit/heatSplit.sp_1.sn_",info$sn[1],".rda"))
+tmp <- DH1
+names(tmp)[-1] <- pst("house",1,"_",names(tmp)[-1])
+Xor <- tmp
+#
+for(i in 2:nrow(info)){
+    load(pst("data_soenderborg_heatsplit/heatSplit.sp_1.sn_",info$sn[i],".rda"))
+    #
+    tmp <- DH1
+    names(tmp)[-1] <- pst("house",i,"_",names(tmp)[-1])
+    Xor <- merge(Xor,tmp,by="t")
+}
+
+
+# Put together data and X and Xor
+Dbuilding <- subset(data, nms=c("t","Heatload","Ta","I","Ws","Wd","Ta.obs","I.obs","Wd.obs","Ws.obs","cosAoi","sunElevation","tday"))
+range(Dbuilding$t)
+range(X$t)
+
+tmp <- X#[in_range("2010-12-15 01:00:00 GMT", X$t, "2011-02-01 00:00:00 GMT"), ]
+tmp <- tmp[ ,-1]
+names(tmp) <- pst("house", 1:16)
+Dbuilding$Electricityload <- tmp
+
+
+# Xor
+Dbuilding <- subset(Dbuilding, c("2009-01-01 00:00:00 GMT", "2011-05-01 00:00:00 GMT"))
+range(Dbuilding$t)
+range(Xor$t)
+tmp <- Xor[period("2008-12-01 01:00:00 GMT",Xor$t,"2011-05-01 00:00:00 GMT"), ]
+tmp <- tmp[ ,-1]
+Dbuilding$All <- tmp
+
+
+#
+str(Dbuilding)
+
+#
+plot(Dbuilding$Ta.obs, Dbuilding$Ta$k1)
+plot(Dbuilding$Ta.obs, lag(Dbuilding$Ta$k1, 0))
+plot(Dbuilding$Ta.obs, lag(Dbuilding$Ta$k1, 1))
+plot(Dbuilding$Ta.obs, lag(Dbuilding$Ta$k1, 2))
+
+plot(Dbuilding$All$house1_effekt)
+lines(Dbuilding$All$house1_Pheat)
+lines(Dbuilding$All$house1_Pwater)
+plot(Dbuilding$Electricityload$house1, Dbuilding$All$house1_P1)
+
+
+# Don't save it in this folder
+# saveRDS(Dbuilding, file="XX/Dbuilding.Rda")
+
+
+
+#----------------------------------------------------------------
+# Make for solar power forecasting example
+source("functions/aoi.R")
+data_all <- data
+# Take the observed global radiation
+names(data_all)
+data_all$y <- data_all$I.obs
+
+# Make a transformation into solar power, simply a projection on an inclined surface
+names(data_all)
+
+X <- data.frame(t=data_all$t, G=data_all$I.obs, sunElevation=data_all$sunElevation)
+X$IbNwp <- lag_vector(data_all$Ib$k1, 1)
+X$IdNwp <- lag_vector(data_all$Id$k1, 1)
+#
+# Some small positive morning values
+plot_ts(X[period("2011-04-01",X$t,"2011-04-10"), ], c("^G","sunElevation"))
+X$G[X$sunElevation < 0] <- 0
+plot_ts(X[period("2011-04-01",X$t,"2011-04-10"), ], c("^G","sunElevation"))
+#
+#----------------------------------------------------------------
+# First split into direct and diffuse
+zenith <- (pi/2) - X$sunElevation
+# Clearness index with fitted clear sky radiation
+G0 <- 1367 * (1 + 0.033*cos( 2*pi*as.POSIXlt(X$t)$yday/365)) * cos(zenith)
+kt <- X$G / (G0/1000)
+# air mass
+m <- 1/cos(zenith)     
+m[zenith>(0.95*pi/2)] <- 20
+# Only solar elevation above xx deg
+iNA <- 89<aoiToDeg(zenith) | aoiToDeg(zenith)<0
+iNA[is.na(iNA)] <- FALSE
+# i <- 10000:12800
+# plotTSBeg(2)
+# plot(X$t[i],kt[i],ylim=c(0,1.5))
+# plot(X$t[i],X$Ig[i],ylim=c(0,1000))
+# lines(X$t[i],cl$Surf$I[i],col=2)
+# plotTSXAxis(X$t[i])
+kt[iNA] <- NA
+kt[kt>1] <- 1
+# Split into Diffuse
+# Id <- Ig * (0.944-1.538*exp(-exp(2.808-4.759*kt+2.276*kt^2)))# + 0.125*m + 0.013*m^2)))
+# Id <- Ig * (0.952-1.041*exp(-exp(2.3-4.702*kt)))
+# oldktFunction <- function(kt,a=0.952,b=1.041,c=2.3,d=4.702){ 0.3*(a-b*exp(-exp(c-d*kt)))+0.1 }
+ktSigmoid <- function(kt,minOut,maxOut,offset,slope){ minOut+(maxOut-minOut)*(1-1/(1+exp(-(slope*(kt-offset))))) }
+# See how the sigmoid looks
+# t1 <- seq(0,1,by=0.01)
+# plot(t1,ktSigmoid(t1,minOut=0.12,maxOut=0.85,offset=0.45,slope=10),ylim=c(0,1))
+# Split it
+X$Gdiffuse <- X$G * ktSigmoid(kt,minOut=0.12,maxOut=0.85,offset=0.45,slope=10)
+X$Gdirect <- X$G - X$Gdiffuse
+# For solar evelation below xx deg, set to all diffuse
+X$Gdiffuse[iNA] <- X$G[iNA]
+X$Gdirect[iNA] <- 0
+#
+#plotmulti(X[period("2009-06-01",X$t,"2009-06-10"), ], c("^G","^Ib|^Id"))
+#
+#
+X$sinsunelev <- sin(X$sunElevation)
+# Clip it
+X$sinsunelev[X$sinsunelev<0.01] <- 0.01
+# Project the solar beam plane 
+X$Ib_solarplane <- X$Gdirect / X$sinsunelev
+
+#
+panel_power <- function(pAzimuth){
+    latitude <- 54.909275
+    longitude <- 9.807340
+    slope <- 45
+    X$cosAOI <- aoiCos1(X$t, latitude, longitude, slope, pAzimuth)
+    X$sinAOI <- sqrt(1 - X$cosAOI^2)
+    # Since the sun is behind the plane when cosAOI < 0
+    X$sinAOI[X$cosAOI < 0] <- 0
+    X$Ib_PV_plane <- X$Ib_solarplane * X$sinAOI
+    #
+    #plotmulti(X[period("2009-06-01",X$t,"2009-06-10"), ], c("^I|^G","cos|sin"))
+    #
+    # Apply an angle of incidence modifier on the direct
+    fKtab <- function(b0,cosTheta)
+    {
+        # Calculate the AOI modifier
+        Ktab <- 1 - b0*(1/cosTheta - 1)
+        # set the AOI modifier to 0 for cosTheta<0
+                                        #    Ktab[cosTheta<=0] <- 0
+                                        #    Ktab[Ktab<=0] <- 0
+        cosTheta0 <- 1/(1/b0+1)
+        x <- 1/(1+exp(-1000*(cosTheta-cosTheta0)))
+        Ktab <- x*Ktab
+        return(Ktab)
+    }
+    X$fKtab <- fKtab(0.1, X$cosAOI)
+    # The power is then a mix of direct and diffuse
+    X$P_PV_plane <- 0.6 * (X$fKtab * X$Ib_PV_plane + X$Gdiffuse) + 0.4 * X$G
+    #plotmulti(X[period("2009-07-01",X$t,"2009-07-10"), ], c("^I|^G|^P","sin","fKtab","^G$|^P_PV_plane$"))
+    #
+    #
+    X$y <- 6 * X$P_PV_plane
+    X$y[X$y > 5000] <- 2000
+    #
+    #
+    #    tmp <- as.datalist(data_all, period("2011-04-01", data_all$t, "2011-04-08"))
+    tmp <- X[period("2011-03-07", X$t, "2011-03-14"), ]
+    plot(tmp$t, tmp$G/max(tmp$G, na.rm=TRUE), type="l", ylab="Normalized")
+    title(paste("Panel surface towards",pAzimuthSeq_text[i]), line=-1.2)
+    lines(tmp$t, tmp$y/max(tmp$y, na.rm=TRUE), type="l", col=2)
+    X$P_PV_plane[X$P_PV_plane>1000] <- 998
+    return(X$P_PV_plane)
+}
+
+# Fit for these azimuth angles
+pAzimuthSeq <- c(-90, -20, 0, 90)
+pAzimuthSeq_text <- c("East", "South", "20 deg. East", "West")
+#
+
+setpar("ts", mfrow=c(length(pAzimuthSeq),1))
+for(i in 1:length(pAzimuthSeq)){
+    # The sine of the AOI on an inclined surface
+    panel_power(pAzimuthSeq[i])
+}
+legend("topright", c("Global radiation    ","Solar power"), lty=1, col=c(1,2))
+axis.POSIXct(1, X$t, xaxt="s")
+
+
+pAzimuthSeq <- c(-40, -20, 0, 20, 40)
+setpar("ts", mfrow=c(length(pAzimuthSeq),1))
+x <- lapply_cbind_df(pAzimuthSeq, panel_power)
+names(x) <- pst("azimuth.",gsub("-","m",pAzimuthSeq))
+
+data_all$PVpower <- x * 5
+plot_ts.data.list(data_all, c("^I$","PVpower"), kseq=1)
+
+
+# Write for solar power forecasting
+Dsolarpower <- subset(data_all, c("2010-01-01","2011-01-01"), nms=c("t","PVpower","I","Ta","I.obs","Ta.obs","cosAoi","sunElevation","tday"))
+#
+usethis::use_data(Dsolarpower, overwrite=TRUE) 
diff --git a/misc-R/data_soenderborg_make_fullset_with_electricity.R b/misc-R/data_soenderborg_make_fullset_with_electricity.R
new file mode 100644
index 0000000000000000000000000000000000000000..4b841b5e5dc1f0c56652e4386752de30ee34297c
--- /dev/null
+++ b/misc-R/data_soenderborg_make_fullset_with_electricity.R
@@ -0,0 +1,110 @@
+#### setting work directory and libraries ####
+rm(list = ls())
+
+## ## Packages used
+## require(R6)
+require(data.table)
+## require(Rcpp)
+## require(splines)
+library(devtools)
+library(roxygen2)
+
+pack <- as.package("../../onlineforecast")
+load_all(pack)
+
+
+##### Importing data #### First unzip to get the .csv system('unzip
+##### ../data/DataSoenderborg.zip')
+data_or <- fread("data_soenderborg.csv", sep = ",", header = TRUE)
+data_or[, `:=`(t, asct(data_or$t))]
+setDF(data_or)
+
+
+##----------------------------------------------------------------
+## The electricity was not in the above data, so add it
+## Open the info
+info <- read.table("data_soenderborg_selectedInfo.csv",header=TRUE,sep=",")
+
+## Open the hourly data, and make one data.frame with the power for each house
+load("data_soenderborg_orig.rda")
+#load("../data/heat_sp-1.rda")
+D <- D[D$sn %in% info$sn, ]
+
+## Make the acc. el energy (kWh) into into power (kW)
+L <- split(D,D$sn)
+x <- L[[3]]
+Lre <- lapply(L, function(x){
+  x$el <- c(NA,diff(x$P2) / as.numeric(diff(x$t),unit="hours"))
+  ## Remove outliers
+  x$el[x$el>100] <- NA
+  x$el[x$el<0] <- NA
+## plot(x$t,x$el,ylim=c(0,20))
+##   plot(x$t,cumsumWithNA(x$el))
+## plot(x$t,x$P2)  
+  return(x)
+})
+D <- do.call("rbind",Lre)
+
+## Make into a data.frame with each the power for each seperate house as columns
+L <- split(D[ ,c("t","el")],D$sn)
+X <- L[[1]]
+houseid <- info$houseid[ which(info$sn==as.numeric(names(L)[1])) ]
+names(X)[2] <- pst("el",houseid)
+## Rename the columns
+for(i in 2:length(L))
+  {
+    tmp <- L[[i]]
+    houseid <- info$houseid[ which(info$sn==as.numeric(names(L)[i])) ]
+    names(tmp)[2] <- pst("el",houseid)
+    X <- merge(X,tmp,by="t",all=TRUE)
+  }
+
+## To hourly
+X <- resample(X, ts=3600, tstart=asct("2008-12-01"), tend=asct("2011-05-01"))
+
+##---------
+## Read the splitted heat
+load(pst("data_soenderborg_heatsplit/heatSplit.sp_1.sn_",info$sn[1],".rda"))
+Xheat <- DH1[,c("t","Pheat")]
+names(Xheat) <- c("t",pst("heat",1))
+##
+for(i in 2:nrow(info))
+  {
+    load(pst("data_soenderborg_heatsplit/heatSplit.sp_1.sn_",info$sn[i],".rda"))
+    tmp <- DH1[,c("t","effekt")]
+    names(tmp) <- c("t",pst("heat",i))
+    Xheat <- merge(Xheat,tmp,by="t")
+  }
+
+## Put them together
+X <- merge(Xheat,X,by="t")
+
+## Check that they are the same houses:
+## tmp <- merge(X,data_or, by="t")
+## tmp$heat8 == tmp$Heatload.house8
+## plot(tmp$heat2 - tmp$Heatload.house2)
+## ##
+## i <- 1:1000
+## plot(tmp$heat8[i])
+## lines(tmp$Heatload.house8[i])
+
+range(data_or$t)
+names(data_or)
+range(X$t)
+names(X)
+
+## The electricity to data_or
+tmp <- X[ ,c(1,grep("^el",names(X)))]
+names(tmp) <- gsub("el", "Elecload.house", names(tmp))
+data <- merge(data_or,tmp, by="t")
+
+unique(diff(data$t))
+range(data$t)
+
+tmp <- as.data.list(data)
+
+
+
+## Write it
+tmp <- data[in_range("2010-01-01", data$t, "2011-01-01"), ]
+write.table(tmp, "~/tmp/data_soenderborg_2010.csv", sep=",", row.names=FALSE)
diff --git a/misc-R/data_soenderborg_orig.rda b/misc-R/data_soenderborg_orig.rda
new file mode 100644
index 0000000000000000000000000000000000000000..dea9776305edff74d62062762ffcc9fa6ab5dfcb
Binary files /dev/null and b/misc-R/data_soenderborg_orig.rda differ
diff --git a/misc-R/data_soenderborg_selectedInfo.csv b/misc-R/data_soenderborg_selectedInfo.csv
new file mode 100644
index 0000000000000000000000000000000000000000..711b4f5ac29e6cac8ffb4503b286955c7a3f4bda
--- /dev/null
+++ b/misc-R/data_soenderborg_selectedInfo.csv
@@ -0,0 +1,17 @@
+"sn","pdfside","adresse","BBRm2","opfoert","Type","Ydervaeg","Tag","bem","beboere","pejs","bortrejst","elgulvvarme","solvarme","supplVarme","natsaenkning","pdfsidenr","houseid"
+4218597,15,"Skovbrynet 23",151,1970,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Tegl","",2,0,0,0,0,0,0,15,1
+4218598,36,"Frejasvej 10",163,1969,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Tegl","",2,0,0,0,0,0,0,36,2
+4711176,40,"Laurids Skaus vej 2",140,1963,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Fibercement, herunder asbest (bølge- eller skifereternit)","",2,0,0,0,0,0,0,40,3
+4724106,21,"Violvej 1",86,1952,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Tagpap (med taghældning)","",2,0,0,0,0,0,0,21,4
+4836681,6,"Henrik Ibsens Vej 5",111,1966,"Fritliggende enfamilieshus (parcelhus)","Letbeton (lette bloksten, gasbeton)","Metalplader (bølgeblik, aluminium og lignende)","",1,0,0,0,0,0,0,6,5
+4964553,8,"Dybbøløstenvej 4",119,1963,"","","","",2,0,0,0,0,0,0,8,6
+5036505,10,"Møllegade 47",119,1947,"","","","",3,0,0,0,0,0,0,10,7
+5107720,2,"Agtoftsvej 39",160,1965,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Fibercement, herunder asbest (bølge- eller skifereternit)","olietank?",4,0,0,0,0,0,0,2,8
+5159799,17,"Peter Graus Vej 5",173,1965,"Fritliggende enfamilieshus (parcelhus)","Mursten (tegl, kalksandsten, cementsten)","Fibercement, herunder asbest (bølge- eller skifereternit)","",1,0,0,0,0,0,0,17,9
+5164534,41,"Brunhoved 2",135,1996,"","","","",2,0,0,0,0,0,1,41,10
+5183232,33,"Aprilvej 4",122,1966,"","","","",2,0,0,0,0,0,0,33,11
+5193768,16,"Brombærhegnet 18",136,1975,"","","","",2,0,0,0,0,0,0,16,12
+5194732,NA,"Parkgade 44",86,1937,"","","","",2,0,1,0,0,0,1,29,13
+5194965,27,"Gammel Aabenraavej 29",123,1965,"","","","",2,0,0,0,0,0,1,27,14
+5197381,26,"Midtkobbel 23",127,1953,"","","","",2,0,0,0,0,0,0,26,15
+5223036,7,"Gyvelvej 3",137,1967,"","","","",5,0,0,0,0,0,1,7,16
diff --git a/misc-R/functions/aoi.R b/misc-R/functions/aoi.R
new file mode 100644
index 0000000000000000000000000000000000000000..dd5cacbab94ffd2041afe0d7d0591c90d952a334
--- /dev/null
+++ b/misc-R/functions/aoi.R
@@ -0,0 +1,173 @@
+aoiToRad <- function(angle)
+  {
+    angle/180 * pi
+  }                                   
+
+aoiToDeg <- function(rad)
+  {
+    rad/pi * 180
+  }
+
+aoiEquationOfTime <- function(time)
+{
+  day <- as.POSIXlt(time)$yday
+  # Day is the number of days since the start of the year
+  b <- aoiToRad((360/365) * (day-81))
+  # Equation of time in minutes
+  9.87 * sin(2*b) - 7.53 * cos(b) - 1.5 * sin(b)
+}
+
+aoiLocalSolarTime <- function(time, longitude)
+{
+  ## time is in UTC
+  ## longitude is in degrees. Positive east of greenwich. The earth rotates 1 degrees in 4 minutes.
+  time + ( 4 * aoiToDeg(longitude) + aoiEquationOfTime(time) ) * 60
+}
+
+aoiSunHourAngle <- function(time, longitude)
+{
+  ## localSolarTime is in seconds since 1970 given as an POSIXct object
+  t <- as.POSIXlt(aoiLocalSolarTime(time, longitude))
+  LST.tod <- t$hour + t$min/60 + t$sec/3600
+  ## Output hourAngle in rad
+  aoiToRad( 15 * (LST.tod  - 12) )
+}
+
+aoiSunDeclination <- function(time)
+  {
+    #### Calculate declination angle
+    day <- as.POSIXlt(time)$yday + 1
+    ## Return the result in rad
+    asin( sin(aoiToRad(23.45)) * sin(aoiToRad((360/365)*(day-81))) )
+  }
+
+aoiSunElevation <- function(latitude, hourAngle, declination)
+  {
+    asin( cos(hourAngle) * cos(declination) * cos(latitude)  + sin(declination) * sin(latitude) )
+  }
+
+aoiSunElevationDeg <- function(time, latitude, longitude)
+  {
+    ## All input angles are given in degrees, transform them into rad
+    latitude <- aoiToRad(latitude)
+    longitude <- aoiToRad(longitude)
+    ## Calculate the earth declination
+    declination <- aoiSunDeclination(time)
+    ## Calculate the hourAngle
+    hourAngle <- aoiSunHourAngle(time, longitude)
+    ## Calculate the elevation angle of the sun
+    aoiSunElevation(latitude, hourAngle, declination)
+  }
+
+aoiSunAzimuth <- function(latitude, elevation, declination, hourAngle)
+  {
+    ## Works only for latitudes above the max declination of the earth: 23.45 degrees
+    if(abs(aoiToDeg(latitude))<=23.45){ stop("Works only for latitudes above the max declination of the earth: 23.45 degrees") }
+    ##
+    sAzimuth <- acos( (sin(declination) * cos(latitude) - cos(declination) * sin(latitude) * cos(hourAngle))/( cos(elevation) ) )
+    i <- hourAngle > 0
+    hourAngle[i] <- { sAzimuth[i] <- 2*pi - sAzimuth[i] }
+    sAzimuth
+  }
+
+aoiSunAzimuthDeg <- function(time, latitude, longitude)
+  {
+    if(latitude<=23.45){ stop("Works only for latitudes above the max declination of the earth: 23.45 degrees") }
+    ## All input angles are given in degrees, transform them into rad
+    latitude <- aoiToRad(latitude)
+    longitude <- aoiToRad(longitude)
+    ## Calculate the earth declination
+    declination <- aoiSunDeclination(time)
+    ## Calculate the hourAngle
+    hourAngle <- aoiSunHourAngle(time, longitude)
+    ## Calculate the elevation angle of the sun
+    elevation <- aoiSunElevation(latitude, hourAngle, declination)
+    ## Return in Rad
+    aoiSunAzimuth(latitude, elevation, declination, hourAngle)
+  }
+
+aoiCos1 <- function(time, latitude, longitude, slope, pAzimuth)
+{
+  ## All angle are given in degrees.
+  ## slope: is the angle between the normal to the ground surface, and the normal of the panel.
+  ## pAzimuth: azimuth of the panel where 0 degrees is due south. + is toward west, - is toward east.
+  
+  ## All input angles are given in degrees, transform them into rad
+  longitude <- aoiToRad(longitude)
+  latitude <- aoiToRad(latitude)
+  slope <- aoiToRad(slope)
+  pAzimuth <- aoiToRad(pAzimuth)
+
+  ## Calculate the earth declination
+  dcl <- aoiSunDeclination(time)
+
+  ## Calculate the hourAngle
+  hourAngle <- aoiSunHourAngle(time, longitude)
+
+  ## Calculate the angle of incidence
+  cosAOI <- (sin(dcl) * sin(latitude) * cos(slope) - sin(dcl) * cos(latitude) * sin(slope) * cos(pAzimuth)
+               + cos(dcl) * cos(latitude) * cos(slope) * cos(hourAngle)
+               + cos(dcl) * sin(latitude) * sin(slope) * cos(pAzimuth) * cos(hourAngle)
+               + cos(dcl) * sin(slope) * sin(pAzimuth) * sin(hourAngle))
+  
+  ## Return the result 
+  return(cosAOI)
+}
+
+aoiCos2 <- function(time, latitude, longitude, slope, pAzimuth)
+  {
+    ## All angle are given in degrees.
+    ## slope: is the angle between the normal to the ground surface, and the normal of the panel.
+    ## pAzimuth: azimuth of the panel where 0 degrees is due south. + is toward west, - is toward east.
+    pAzimuth <- pAzimuth + 180 # The algorithm uses a panel azimuth where 180 degree is due south
+    
+    ## All input angles are given in degrees, transform them into rad
+    longitude <- aoiToRad(longitude)
+    latitude <- aoiToRad(latitude)
+    slope <- aoiToRad(slope)
+    pAzimuth <- aoiToRad(pAzimuth)
+
+    ## Calculate the earth declination
+    declination <- aoiSunDeclination(time)
+
+    ## Calculate the hourAngle
+    hourAngle <- aoiSunHourAngle(time, longitude)
+
+    ## Calculate the zenith angle of the sun
+    elevation <- aoiSunElevation(latitude, hourAngle, declination)
+    zenith <- (pi/2) - elevation
+
+    ## Calculate the azimuth of the sun
+    sAzimuth <- aoiSunAzimuth(latitude, elevation, declination, hourAngle)
+
+    ## Calculate the angle of incidence
+    cosAOI <-  cos(slope) * cos(zenith) + sin(slope) * sin(zenith) * cos(sAzimuth - pAzimuth)
+
+    ## Return the result
+    return(cosAOI)
+  }
+
+
+
+## ###########################################################################
+## ## Tests and examples
+## ## All times must be in GMT
+## t <- seq(ISOdate(2009,6,1,0),ISOdate(2009,6,3,0),by=60)
+
+## ## This will calculate the elevation at the solar collectors at Byg in radians
+## latitude <- 25
+## longitude <- 12
+## ## latitude <- 37
+## ## longitude <- -2
+
+## ## Solar elevation
+## elevRad <- aoiSunElevation( aoiToRad(latitude), aoiSunHourAngle(t,aoiToRad(longitude)), aoiSunDeclination(t))
+## elevRad2 <- aoiSunElevationDeg(t,latitude,longitude)
+## ##
+## plot(aoiSunAzimuthDeg(t,latitude,longitude),aoiToDeg(elevRad))
+
+## lines(t,aoiToDeg(elevRad2),col=2)
+
+## ## Angle of incidence of some surface
+## par(mfrow=c(2,1))
+## plot(t,aoiCos1(t, longitude, latitude, slope=90, pAzimuth=0))
diff --git a/misc-R/lm-example.R b/misc-R/lm-example.R
new file mode 100644
index 0000000000000000000000000000000000000000..cfab7c4e80071094e6d6f85abcdac5f023a83322
--- /dev/null
+++ b/misc-R/lm-example.R
@@ -0,0 +1,179 @@
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+D <- Dbuildingheatload
+names(D)
+D$y <- D$heatload
+
+
+plot_ts(D, c("^y","Ta"), kseq=c(1,12))
+
+
+
+
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$scoreperiod <- in_range("2010-12-20", Dtrain$t)
+
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+                 AR = "AR(lags=c(0))",     # The ambient temperature
+                 mu = "ones()") # ones() generates a matrix of ones (i.e. an intercept is included)
+
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999))
+
+model$kseq <- c(1,18)
+model$p <- lm_optim(model, Dtrain, control=list(maxit=2))$par
+
+model$kseq <- 1:36
+val <- lm_fit(model$p, model, D, returnanalysis = TRUE)
+
+undebug(lm_fit)
+
+names(val)
+val$Lfitval$k1
+val$scoreperiod
+head(val[[6]])
+
+head(val$Yhat, 20)
+
+names(model)
+D$Yhat <- val$Yhat
+dev.new()
+plot_ts(D, c("^y|^Y"), kseq = c(1,18))
+
+
+
+####
+
+model$insert_p(model$p)
+datatr <- model$transform_data(D)
+names(datatr)
+
+X <- as.data.frame(subset(datatr, kseq = 1, lagforecasts = TRUE))
+inputnms <- names(X)
+## Add the model output to the data.frame for lm()
+X[ ,model$output] <- D[[model$output]]
+
+head(X,10)
+## Generate the formula
+frml <- pst(model$output, " ~ ", pst(inputnms, collapse=" + "), " - 1")
+## Fit the model
+fit <- lm(frml, X)
+
+summary(fit)
+head(fitted(fit))
+
+
+head(predict(fit, X), 10)
+head(predict(fit, Xpred))
+
+
+head(X,10)
+head(Xpred,10)
+#k <- model$kseq[1]
+#fit <- model$Lfits[[1]]
+## Form the regressor matrix, don't lag
+Xpred <- as.data.frame(subset(datatr, kseq = 1))
+pred <- predict(fit, Xpred)
+pred2 <- predict(fit, X)
+head(pred)
+head(pred2,10)
+summary(fit)
+head(Xpred)
+
+X$Ta.k1[2]*fit$coef[1] + X$mu.k1[2]*fit$coef[2]
+
+
+head(fit$residuals)
+head(X$y - pred)
+
+###
+
+head(val$Yhat)
+
+head(pred, 10)
+head(pred2,10)
+
+## Predictions
+x <- rnorm(15)
+y <- x + rnorm(15)
+fitTest <- lm(y ~ x)
+predict(fitTest)
+fitted(fitTest)
+
+new <- data.frame(x = seq(-3, 3, 0.5))
+predict(fitTest, new, se.fit = TRUE)
+
+
+
+# Take data (See vignette "building-heat-load-forecasting" for better model and more details)
+D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+D$y <- D$Heatload[ ,1]
+# Define a model 
+model <- forecastmodel$new()
+model$output <- "y"
+model$add_inputs(Ta = "Ta")
+model$add_regp("rls_prm(lambda=0.9)")
+
+# Before fitting the model, define which points to include in the evaluation of the score function
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# And the sequence of horizons to fit for
+model$kseq <- 1:6
+ 
+# Now we can fit the model and get the model validation analysis data
+L <- rls_fit(p = c(lambda=0.99), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$Yhat$k1) # The one-step forecast
+plot(L$Lfitval$k1) # The one-step RLS coefficient over time
+# Fitting with lower lambda makes the RLS parameter change faster
+L <- rls_fit(p = c(lambda=0.9), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$Lfitval$k1) # The one-step RLS coefficient over time
+# It can return a score
+rls_fit(c(lambda=0.99), model, D, scorefun=rmse, returnanalysis = FALSE)
+# Such that it can be passed to an optimzer (see ?rls_optim for a nice wrapper of optim)
+val <- optim(c(lambda=0.99), rls_fit, model = model, data = D, scorefun = rmse, returnanalysis = FALSE, method = "L-BFGS-B", lower = 0.5, upper = 0.9999)
+val$p
+
+# See rmse as a function of horizon
+val <- rls_fit(p = c(lambda=0.9), model = model, data = D, returnanalysis = TRUE, scorefun = rmse)
+names(val)
+head(val$scoreval, 100)
+plot(val$scoreval)
+
+
+
+
+# Take data (See vignette "building-heat-load-forecasting" for better model and more details)
+D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+D$y <- D$Heatload[ ,1]
+# Define a model 
+model <- forecastmodel$new()
+model$output <- "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)")
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999))
+# Before fitting the model, define which points to include in the evaluation of the score function
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# And the sequence of horizons to fit for
+model$kseq <- 1:6
+ 
+# Now we can fit the model and get the model validation analysis data
+L <- lm_fit(p = c(Ta__a1 = 0.7), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$Yhat$k1) # The one-step forecast
+
+# The coefficients for each model
+head(L$Lfitval)
+# It can return a score
+lm_fit(c(Ta__a1=0.7), model, D, scorefun=rmse, returnanalysis = FALSE)
+# Such that it can be passed to an optimzer (see ?rls_optim for a nice wrapper of optim)
+val <- optim(c(Ta__a1=0.7), lm_fit, model = model, data = D, scorefun = rmse, returnanalysis = FALSE, method = "L-BFGS-B", lower = 0.5, upper = 0.9999)
+val$p
+
+# See rmse as a function of horizon
+val <- lm_fit(p = c(Ta__a1 = 0.7), model = model, data = D, returnanalysis = TRUE, scorefun = rmse)
+names(val)
+head(val$scoreval, 100)
+plot(val$scoreval)
diff --git a/misc-R/lm-example.R~ b/misc-R/lm-example.R~
new file mode 100644
index 0000000000000000000000000000000000000000..e7b246f347bce62d78f96dae1cd0e208028483c5
--- /dev/null
+++ b/misc-R/lm-example.R~
@@ -0,0 +1,187 @@
+rm(list = ls())
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+D <- Dbuildingheatload
+names(D)
+D$y <- D$Heatload$house9
+
+
+plot_ts(D, c("^y","Ta"), kseq=c(1,12))
+
+
+
+
+Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+Dtrain$scoreperiod <- in_range("2010-12-20", Dtrain$t)
+
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+                 AR = "AR(lags=c(0))",     # The ambient temperature
+                 mu = "ones()") # ones() generates a matrix of ones (i.e. an intercept is included)
+
+model$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999))
+
+model$kseq <- c(1,18)
+source("../R/lm_fit.R")
+model$p <- lm_optim(model, Dtrain, control=list(maxit=2))$par
+
+model$kseq <- 1:36
+debug(lm_fit)
+val <- lm_fit(model$p, model, D, returnanalysis = TRUE)
+
+undebug(lm_fit)
+
+names(val)
+val$LTheta$k1
+val$scoreperiod
+head(val[[6]])
+
+head(val$Yhat, 20)
+
+ names(model)
+lm_fit
+D$Yhat <- val$Yhat
+dev.new()
+plot_ts(D, c("^y|^Y"), kseq = c(1,18))
+
+plot(val$Resid$k10)
+head(val$Yhat)
+dev.new()
+plot(D$t, D$y, type = "l")
+lines(D$t, D$Yhat$k1, col = "red")
+lines(D$t, D$Yhat$k18, col = "blue")
+
+####
+
+model$insert_p(model$p)
+datatr <- model$transform_data(D)
+names(datatr)
+
+X <- as.data.frame(subset(datatr, kseq = 1, lagforecasts = TRUE))
+inputnms <- names(X)
+## Add the model output to the data.frame for lm()
+X[ ,model$output] <- D[[model$output]]
+
+head(X,10)
+## Generate the formula
+frml <- pst(model$output, " ~ ", pst(inputnms, collapse=" + "), " - 1")
+## Fit the model
+fit <- lm(frml, X)
+
+summary(fit)
+head(fitted(fit))
+
+
+head(predict(fit, X), 10)
+head(predict(fit, Xpred))
+
+
+head(X,10)
+head(Xpred,10)
+#k <- model$kseq[1]
+#fit <- model$Lfits[[1]]
+## Form the regressor matrix, don't lag
+Xpred <- as.data.frame(subset(datatr, kseq = 1))
+pred <- predict(fit, Xpred)
+pred2 <- predict(fit, X)
+head(pred)
+head(pred2,10)
+summary(fit)
+head(Xpred)
+
+X$Ta.k1[2]*fit$coef[1] + X$mu.k1[2]*fit$coef[2]
+
+
+head(fit$residuals)
+head(X$y - pred)
+
+###
+
+head(val$Yhat)
+
+head(pred, 10)
+head(pred2,10)
+
+## Predictions
+x <- rnorm(15)
+y <- x + rnorm(15)
+fitTest <- lm(y ~ x)
+predict(fitTest)
+fitted(fitTest)
+
+new <- data.frame(x = seq(-3, 3, 0.5))
+predict(fitTest, new, se.fit = TRUE)
+
+
+
+# Take data (See vignette "building-heat-load-forecasting" for better model and more details)
+D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+D$y <- D$Heatload[ ,1]
+# Define a model 
+model <- forecastmodel$new()
+model$output <- "y"
+model$add_inputs(Ta = "Ta")
+model$add_regp("rls_prm(lambda=0.9)")
+
+# Before fitting the model, define which points to include in the evaluation of the score function
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# And the sequence of horizons to fit for
+model$kseq <- 1:6
+ 
+# Now we can fit the model and get the model validation analysis data
+L <- rls_fit(p = c(lambda=0.99), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$Yhat$k1) # The one-step forecast
+plot(L$LTheta$k1) # The one-step RLS coefficient over time
+# Fitting with lower lambda makes the RLS parameter change faster
+L <- rls_fit(p = c(lambda=0.9), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$LTheta$k1) # The one-step RLS coefficient over time
+# It can return a score
+rls_fit(c(lambda=0.99), model, D, scorefun=rmse, returnanalysis = FALSE)
+# Such that it can be passed to an optimzer (see ?rls_optim for a nice wrapper of optim)
+val <- optim(c(lambda=0.99), rls_fit, model = model, data = D, scorefun = rmse, returnanalysis = FALSE, method = "L-BFGS-B", lower = 0.5, upper = 0.9999)
+val$p
+
+# See rmse as a function of horizon
+val <- rls_fit(p = c(lambda=0.9), model = model, data = D, returnanalysis = TRUE, scorefun = rmse)
+names(val)
+head(val$scoreval, 100)
+plot(val$scoreval)
+
+
+
+
+# Take data (See vignette "building-heat-load-forecasting" for better model and more details)
+D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+D$y <- D$Heatload[ ,1]
+# Define a model 
+model <- forecastmodel$new()
+model$output <- "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)")
+model$add_pb(Ta__a1 = c(0.8, 0.9, 0.9999))
+# Before fitting the model, define which points to include in the evaluation of the score function
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# And the sequence of horizons to fit for
+model$kseq <- 1:6
+ 
+# Now we can fit the model and get the model validation analysis data
+L <- lm_fit(p = c(Ta__a1 = 0.7), model = model, data = D, returnanalysis = TRUE)
+names(L)
+plot(L$Yhat$k1) # The one-step forecast
+
+# The coefficients for each model
+head(L$LTheta)
+# It can return a score
+lm_fit(c(Ta__a1=0.7), model, D, scorefun=rmse, returnanalysis = FALSE)
+# Such that it can be passed to an optimzer (see ?rls_optim for a nice wrapper of optim)
+val <- optim(c(Ta__a1=0.7), lm_fit, model = model, data = D, scorefun = rmse, returnanalysis = FALSE, method = "L-BFGS-B", lower = 0.5, upper = 0.9999)
+val$p
+
+# See rmse as a function of horizon
+val <- lm_fit(p = c(Ta__a1 = 0.7), model = model, data = D, returnanalysis = TRUE, scorefun = rmse)
+names(val)
+head(val$scoreval, 100)
+plot(val$scoreval)
diff --git a/misc-R/loadforecast_example_short.R b/misc-R/loadforecast_example_short.R
new file mode 100644
index 0000000000000000000000000000000000000000..f34cdd1c659d53cb747378cd98698c3f37b98121
--- /dev/null
+++ b/misc-R/loadforecast_example_short.R
@@ -0,0 +1,269 @@
+## setting work directory and libraries ####
+rm(list = ls())
+
+## Packages used
+library(devtools)
+library(roxygen2)
+
+pack <- as.package("../../onlineforecast")
+load_all(pack)
+
+## Read the data
+load("../data/Dbuildingheatload.rda")
+Dall <- Dbuildingheatload
+
+## Set the model output y
+Dall$y <- Dall$heatload
+
+kseq <- 0:49
+Dall$AR0 <- lapply_cbind_df(kseq, function(k){
+    Dall$y
+})
+nams(Dall$AR0) <- pst("k",kseq)
+
+Dall$AR0
+
+
+
+## Plot some time series
+##plot_ts(Dall, patterns=c("Ta","Ws","Wd","Id|I$|Ib","^y$"), kseq=c(1,6,12,18,24,36), tstart="2011-01-01", tend="2011-01-10")
+
+## A pairs plot
+##pairs(subset(Dall, pattern="Ta", kseq=c(1:3,6,12,18)), cex=0.5)
+##pairs(subset(Dall, pattern="Ta", kseq=c(1:3,6,12,18), lagforecasts=TRUE), cex=0.5)
+
+##############################################################
+## Recursive Least Squares fitting the model has inputs (a list of inputobjects)
+## Define the model
+##
+Model <- model_class$new(output = "y",
+                         inputs = list(Ta      = "Ta",#"lp(Ta, a1=0.9)",
+                                       I       = "I",#"lp(I, a1=0.7)",
+                                       AR0     = "AR0"#"lp(I, a1=0.7)",
+##                                     mu_tday = "fs(tday/24, nharmonics=10)",
+                                        #mu      = "ones()"),
+                                       ),
+                         MA     = c(0),
+                         fitprm = "rls_prm(lambda=0.9)")
+
+## Set the parameters which are optimized in the offlines setting
+## Model$prmopt_upper <- c(Ta__a1 = 1,   I__a1 = 1,    lambda = 1) - 1e-04
+## Model$prmopt_init <-  c(Ta__a1 = 0.9, I__a1 = 0.81, lambda = 0.99)
+## Model$prmopt_lower <- c(Ta__a1 = 0.5, I__a1 = 0.5,  lambda = 0.9)
+Model$prmopt_upper <- c(lambda = 1) - 1e-04
+Model$prmopt_init <-  c(lambda = 0.99)
+Model$prmopt_lower <- c(lambda = 0.8)
+
+
+##############################################################
+## Recursive Least Squares fitting the model has inputs (a list of inputobjects)
+## The horizons to fit for
+Model$kseq <- c(1)
+prmopt <- Model$prmopt_init
+
+## The fit function always initialize a new fit, no matter what is in 'Model'
+Dpast <- subset(Dall, in_range("2010-12-01", Dall$t, "2011-02-01"))
+## Define a logical series which sets the fitting period
+Dpast$scoreperiod <- in_range("2010-12-15", Dpast$t, "2011-02-01")
+## From there make a test set
+Dtest <- subset(Dall, in_range("2011-02-01", Dall$t, "2011-02-03"))
+
+## Fit to see it is working
+rls_fit(prmopt = Model$prmopt_init,
+        model = Model,
+        data = Dpast,
+        scorefun = rmse)
+
+
+## This can be passed to an optimizer to optimize the offline parameters (keep them)
+Model$prmopt <- optim(par = Model$prmopt_init,
+                      fn = rls_fit,
+                      model = Model,
+                      data = Dpast,
+                      scorefun = rmse,
+                      lower = Model$prmopt_lower,
+                      upper = Model$prmopt_upper)$par
+
+
+## The model could be copied (useful for saving a particular instance)
+## Model1 <- Model$clone(deep = TRUE)
+
+## Use the optimized parameters and fit for all horizons
+#Model$kseq <- 1:36
+#rls_fit(Model$prmopt, Model, Dpast)
+
+## Calculate predictions on a new data (NOTE: This uses the latest RLS parameter values, it doesn't update recursively)
+#prd <- rls_predict(Model, Model$transform_data(Dtest))
+#prd
+
+
+## ################################################################
+## Plot k step ahead prediction series
+Model$kseq <- c(1)
+Yhat <- rls_fit(Model$prmopt, Model, Dpast, return_analysis=TRUE)$Yhat
+Dpast$yhat <- Yhat$k1
+
+tmp <- subset(Dpast, in_range("2011-01-01", Dpast$t, "2011-01-05"))
+plot(tmp$t, tmp$y, type = "l")
+lines(tmp$t, tmp$yhat, type = "l", col = 2)
+
+
+## ################################################################
+## Plot a prediction for a particular time point
+prd <- rls_predict(Model, Model$transform_data(Dtest))
+##
+plot(1:length(Dtest$y), Dtest$y)
+##
+lines(1:length(prd[1, ]), prd[1, ])
+for (i in 2:nrow(prd)) {
+  lines(i:(length(prd[1, ])+i-1), prd[i, ], col=i)
+}
+
+
+## ################################################################
+## Recursive update example
+
+## First fit on the past data (resets input and parameter states)
+rls_fit(Model$prmopt, Model, Dpast)
+
+## Recursive updating parameters and prediction
+prd <- as.data.frame(matrix(NA, nrow = length(Dtest$t), ncol = length(Model$kseq)))
+names(prd) <- paste0("k",Model$kseq)
+##
+for (it in 1:length(Dtest$t)) {
+  print(paste(it,"of",length(Dtest$t)))
+  ## A new datalist
+  Dnew <- subset(Dtest, it)
+  ## Generate the inputs
+  ## Important: Note this must only be done once for new input data, since it continues from last state when lowpass filtering
+  D <- Model$transform_data(Dnew)
+  ## New output observations
+  y <- Dnew[[Model$output]]
+  ## We can now update the parameters
+  rls_update(Model, D, y)
+  ## and make a prediction
+  prd[it, ] <- unlist(rls_predict(Model, D))
+}
+
+## Lag the predictions to match observations
+prd <- lag(prd, lags = "+k")
+plot(Dtest$y)
+lines(prd$k8)
+
+## Check if we get the same as when we do it in one fit
+Dboth <- subset(Dall, in_range(min(Dpast$t)-3600, Dall$t, max(Dtest$t)))
+Valanalysis <- rls_fit(Model$prmopt, model = Model, data = Dboth, return_analysis = TRUE)
+
+n <- length(Dboth$t)
+prd2 <- Valanalysis$Yhat[(n-nrow(prd)+1):n, ]
+prd2 - prd
+max(prd2 - prd, na.rm = TRUE)
+
+
+## ################################################################
+## But the error is auto-correlated
+tmp <- rls_fit(Model$prmopt, Model, Dpast, return_analysis=TRUE)
+## For 1-step ahead
+Dpast$Yhat <- tmp$Yhat
+Dpast$yhat <- tmp$Yhat$k1
+residuals <- Dpast$y - Dpast$yhat
+##
+plot(residuals)
+acf(residuals, na.action = na.pass)
+
+## For 2-step ahead
+Dpast$yhat <- tmp$Yhat$k2
+residuals <- Dpast$y - Dpast$yhat
+##
+plot(residuals)
+acf(residuals, na.action = na.pass)
+
+plot_ts(Dpast$Yhat[950:1000, ], "k1$|k2$")
+
+## Keep the residuals to use as input to an error model
+## Keep the residuals to use as output
+Dpast$r <- Dpast$y - tmp$Yhat$k1
+## (Now use only the one-step ahead forecast as input (and output))
+##Dpast$R <- Dpast$y - tmp$Yhat
+Dpast$R <- matrix(Dpast$r, nrow = length(residuals), ncol = 36)
+nams(Dpast$R) <- pst("k",1:36)
+
+## See them for different horizonts (they are equal now)
+plot_ts(Dpast$R[900:1000, ], "k[[:digit:]]$")
+
+## Define a model
+Me <- model_class$new(output = "r",
+                      inputs = list(R = "R"),
+                      fitprm = "rls_prm(lambda=0.9)")
+
+## Set the parameters which are optimized in the offlines setting
+Me$prmopt_upper <- c(lambda = 1) - 1e-04
+Me$prmopt_init <-  c(lambda = 0.95)
+Me$prmopt_lower <- c(lambda = 0.9)
+
+## Set the horizons for offline optimization
+Me$kseq <- c(1,2)
+prmopt <- Me$prmopt_init
+
+## Fit to check it is working
+Val <- rls_fit(prmopt = Me$prmopt_init,
+        model = Me,
+        data = Dpast,
+        scorefun = rmse,
+        return_analysis = TRUE)
+
+tmp <- cbind(r = Dpast$r, Val$Yhat)
+plot_ts(tmp[which(Dpast$scoreperiod)[1000:1100], ], "*")
+
+
+Dpast$ResidualsYhat <- Val$Yhat
+
+plot_ts(subset(Dpast,Dpast$scoreperiod), "Residuals", 1:2)
+plot_ts(subset(Dpast,which(Dpast$scoreperiod)[1:200]), "Residuals", 1:2)
+
+## This can be passed to an optimizer to optimize the offline parameters (keep them)
+Me$prmopt <- optim(par = Me$prmopt_init,
+                   fn = rls_fit,
+                   model = Me,
+                   data = Dpast,
+                   scorefun = rmse,
+                   lower = Me$prmopt_lower,
+                   upper = Me$prmopt_upper)$par
+
+
+## Predict on both train and test period
+## First make residuals on the entire period
+Dboth <- subset(Dall, in_range(min(Dpast$t)-3600, Dall$t, max(Dtest$t)))
+Yhat <- rls_fit(Model$prmopt, model = Model, data = Dboth, return_analysis = TRUE)$Yhat
+
+Dboth$r <- Dboth$y - Yhat$k1
+Dboth$R <- matrix(Dboth$r, nrow = length(Dboth$r), ncol = 36)
+nams(Dboth$R) <- pst("k",1:36)
+
+## Use the optimized parameters and fit a model for all horizons
+Me$kseq <- 1:36
+
+Rhat <- rls_fit(Me$prmopt, Me, Dboth, return_analysis = TRUE)$Yhat
+
+scoreperiod <- in_range("2010-12-15", Dboth$t, "2011-02-01")
+#itest <- in_range("2011-02-01", Dboth$t, "2011-02-03")
+
+Yhat_r <- Dboth$y[scoreperiod] - Yhat[scoreperiod, ]
+(rmse_Yhat_r <- apply(Yhat_r, 2, rmse))
+
+## The final forecasts
+Yhat_combined <- Yhat + Rhat
+
+Rhat_r <- Dboth$y[scoreperiod] - Yhat_combined[scoreperiod, ]
+(rmse_Rhat_r <- apply(Rhat_r, 2, rmse))
+#apply(Dboth$r[scoreperiod] - tmp2$Yhat[scoreperiod, ], 2, rmse)
+
+plot(rmse_Yhat_r, ylim = range(rmse_Yhat_r,rmse_Rhat_r))
+points(rmse_Rhat_r, col=2)
+
+
+k <- 1
+plot_ts(cbind(y=Dboth$y, yhat=Yhat[ ,1], yhat_combined=Yhat_combined[ ,1])[1100:1400, ], "*")
+
+k <- 2
+plot_ts(cbind(y=Dboth$y, yhat=Yhat[ ,k], yhat_combined=Yhat_combined[ ,k])[1100:1400, ], "*")
diff --git a/misc-R/make_example_julian.R b/misc-R/make_example_julian.R
new file mode 100644
index 0000000000000000000000000000000000000000..48b81b8f50b3dee572180182bd2e446a4f1794d4
--- /dev/null
+++ b/misc-R/make_example_julian.R
@@ -0,0 +1,11 @@
+## Go first and compile the package
+
+##
+library(knitr)
+purl("building-electricity-load-forecast.Rmd")
+
+## Then add files
+files <- c("../../onlineforecast_0.1.0.tar.gz",
+           "./Dbuilding.Rda",
+           "./building-electricity-load-forecast.R")
+zip("~/tmp/julianExample.zip", files, flags="-r9X-j")
diff --git a/misc-R/plotly-test.R b/misc-R/plotly-test.R
new file mode 100644
index 0000000000000000000000000000000000000000..c441871c8136bc64c2e1b802402502bdb45ecdbd
--- /dev/null
+++ b/misc-R/plotly-test.R
@@ -0,0 +1,57 @@
+## ----------------------------------------------------------------
+## Load the current version directly from the folder
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+
+
+D <- Dbuildingheatload
+
+
+
+library(plotly)
+
+
+
+plotly_ts(D, c("heatload","Ta","I|heatload"), kseq=1:3)
+oi
+
+fig <- plot_ly(x=D$t, y=D$heatload)
+fig <- fig %>% add_lines()
+fig
+
+
+
+nlines <- 3
+colormap <- colorRampPalette(c("black","cyan","purple","blue","red","green"))(nlines)
+
+Df <- as.data.frame(D)
+
+
+L <- list()
+for(ii in 1:2){
+    fig <- plot_ly(x=Df$t)
+    for(i in 1:nlines){
+        ip <- (ii-1)*nlines + i + 1
+        fig <- fig %>% add_lines(y = Df[ ,ip], name = names(Df)[ip], color=colormap[i], legendgroup = paste0('group',ii), zeroline=FALSE)#, yaxis=paste0('group',ii))
+    }
+    # Add empty to make legend gap
+    fig <- fig %>% add_lines(y = rep(NA,nrow(Df)), name = "", color=colormap[i], legendgroup = paste0('group',ii), zeroline=FALSE)#, yaxis=paste0('group',ii))
+    # Keep it
+    L[[ii]] <- fig
+}
+subplot(L, shareX=TRUE, nrows=2)
+
+
+
+fig1 <- fig
+fig <- plot_ly(Df, x = ~t) 
+fig <- fig %>% add_lines(y = ~heatload, name = 'trace 0')#, legendgroup = 'group2') 
+fig <- fig %>% add_lines(y = ~Ta.k1, name = 'trace 1')#, legendgroup = 'group2')
+fig2 <- fig
+subplot(fig1, fig2, shareX=TRUE, nrows=2)
+
+fig2 <- plot_ly(Df, x = ~t) 
+fig2 <- fig2 %>% add_trace(y = ~heatload, name = 'trace 0',mode = 'lines', legendgroup="2") 
+fig2 <- fig2 %>% add_trace(y = ~Ta.k1, name = 'trace 1', mode = 'lines+markers', legendgroup="2") 
+
+subplot(fig, fig2
diff --git a/misc-R/temp.R b/misc-R/temp.R
new file mode 100644
index 0000000000000000000000000000000000000000..2605cc7ebfdd8070107c273b09d48b2998e13d84
--- /dev/null
+++ b/misc-R/temp.R
@@ -0,0 +1,38 @@
+library(devtools)
+document()
+load_all(as.package("../../onlineforecast"))
+?lm_predict
+
+# Take data
+D <- subset(Dbuildingheatload, c("2010-12-15", "2011-01-01"))
+D$y <- D$heatload
+# Define a model 
+model <- forecastmodel$new()
+model$add_inputs(Ta = "lp(Ta, a1=0.7)", mu = "ones()")
+
+# Before fitting the model, define which points to include in the evaluation of the score function
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# And the sequence of horizons to fit for
+model$kseq <- 1:6
+
+# Transform using the mdoel
+datatr <- model$transform_data(D)
+
+# See the transformed data
+str(datatr)
+
+# The model has not been fitted
+model$Lfits
+
+# To fit
+lm_fit(model=model, data=D)
+
+# Now the fits for each horizon are there (the latest update)
+# For example 
+summary(model$Lfits$k1)
+
+# Use the fit for prediction
+D$Yhat <- lm_predict(model, datatr)
+
+# Plot it
+plot_ts(D, c("y|Yhat"), kseq=1)
diff --git a/misc-R/template_roxygen_documentation.r b/misc-R/template_roxygen_documentation.r
new file mode 100644
index 0000000000000000000000000000000000000000..2ca172db9737cae50e71e5ee38d5ef3e46b7de6a
--- /dev/null
+++ b/misc-R/template_roxygen_documentation.r
@@ -0,0 +1,23 @@
+## TEMPLATE FOR MAKING DOCUMENTATION OF R-FUNCTIONS
+## by Linde :)
+## see for more info : https://cran.r-project.org/web/packages/roxygen2/vignettes/rd.html
+
+## put the below before your function and fill in
+## the ?example will only work after running "document()". Then create .Rd file in folder /man
+## So "document()" should be run in make file I think?
+
+#' Write a short description of the function here (this will be title of help page, and "description")
+#'
+#' @param x Describe the parameter
+#' @param y And maybe there are more
+#' @return Write what the function returns, for example: The sum of \code{x} and \code{y}
+#' @examples
+#' example()
+#' example()
+
+example <- function() {
+    blabla <- 0
+    return ( bla )
+}
+
+#?example
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000000000000000000000000000000000000..d3e3f4143c50ad561945e7c34e6fe45de229b498
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,14 @@
+
+## With R 3.1.0 or later, you can uncomment the following line to tell R to 
+## enable compilation with C++11 (where available)
+##
+## Also, OpenMP support in Armadillo prefers C++11 support. However, for wider
+## availability of the package we do not yet enforce this here.  It is however
+## recommended for client packages to set it.
+##
+## And with R 3.4.0, and RcppArmadillo 0.7.960.*, we turn C++11 on as OpenMP
+## support within Armadillo prefers / requires it
+CXX_STD = CXX11
+
+PKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) 
+PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
diff --git a/src/Makevars.win b/src/Makevars.win
new file mode 100644
index 0000000000000000000000000000000000000000..d3e3f4143c50ad561945e7c34e6fe45de229b498
--- /dev/null
+++ b/src/Makevars.win
@@ -0,0 +1,14 @@
+
+## With R 3.1.0 or later, you can uncomment the following line to tell R to 
+## enable compilation with C++11 (where available)
+##
+## Also, OpenMP support in Armadillo prefers C++11 support. However, for wider
+## availability of the package we do not yet enforce this here.  It is however
+## recommended for client packages to set it.
+##
+## And with R 3.4.0, and RcppArmadillo 0.7.960.*, we turn C++11 on as OpenMP
+## support within Armadillo prefers / requires it
+CXX_STD = CXX11
+
+PKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) 
+PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS)
diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a76e06a6f3328f42e085114d4d5eb5f0996e8fc8
--- /dev/null
+++ b/src/RcppExports.cpp
@@ -0,0 +1,51 @@
+// Generated by using Rcpp::compileAttributes() -> do not edit by hand
+// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+#include <RcppArmadillo.h>
+#include <Rcpp.h>
+
+using namespace Rcpp;
+
+// lp_vector_cpp
+NumericVector lp_vector_cpp(NumericVector x, double a1);
+RcppExport SEXP _onlineforecast_lp_vector_cpp(SEXP xSEXP, SEXP a1SEXP) {
+BEGIN_RCPP
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
+    Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP);
+    Rcpp::traits::input_parameter< double >::type a1(a1SEXP);
+    rcpp_result_gen = Rcpp::wrap(lp_vector_cpp(x, a1));
+    return rcpp_result_gen;
+END_RCPP
+}
+// rls_update_cpp
+Rcpp::List rls_update_cpp(arma::vec y, arma::mat X, arma::vec theta, arma::mat P, double lambda, unsigned int k, unsigned int n, unsigned int np, unsigned int istart, unsigned int kmax);
+RcppExport SEXP _onlineforecast_rls_update_cpp(SEXP ySEXP, SEXP XSEXP, SEXP thetaSEXP, SEXP PSEXP, SEXP lambdaSEXP, SEXP kSEXP, SEXP nSEXP, SEXP npSEXP, SEXP istartSEXP, SEXP kmaxSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
+    Rcpp::traits::input_parameter< arma::vec >::type y(ySEXP);
+    Rcpp::traits::input_parameter< arma::mat >::type X(XSEXP);
+    Rcpp::traits::input_parameter< arma::vec >::type theta(thetaSEXP);
+    Rcpp::traits::input_parameter< arma::mat >::type P(PSEXP);
+    Rcpp::traits::input_parameter< double >::type lambda(lambdaSEXP);
+    Rcpp::traits::input_parameter< unsigned int >::type k(kSEXP);
+    Rcpp::traits::input_parameter< unsigned int >::type n(nSEXP);
+    Rcpp::traits::input_parameter< unsigned int >::type np(npSEXP);
+    Rcpp::traits::input_parameter< unsigned int >::type istart(istartSEXP);
+    Rcpp::traits::input_parameter< unsigned int >::type kmax(kmaxSEXP);
+    rcpp_result_gen = Rcpp::wrap(rls_update_cpp(y, X, theta, P, lambda, k, n, np, istart, kmax));
+    return rcpp_result_gen;
+END_RCPP
+}
+
+static const R_CallMethodDef CallEntries[] = {
+    {"_onlineforecast_lp_vector_cpp", (DL_FUNC) &_onlineforecast_lp_vector_cpp, 2},
+    {"_onlineforecast_rls_update_cpp", (DL_FUNC) &_onlineforecast_rls_update_cpp, 10},
+    {NULL, NULL, 0}
+};
+
+RcppExport void R_init_onlineforecast(DllInfo *dll) {
+    R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
+    R_useDynamicSymbols(dll, FALSE);
+}
diff --git a/src/lp_vector_cpp.cpp b/src/lp_vector_cpp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a14bfc9f0d6e3472ed2000f6bf2064b6a54fd6fb
--- /dev/null
+++ b/src/lp_vector_cpp.cpp
@@ -0,0 +1,44 @@
+// we only include RcppArmadillo.h which pulls Rcpp.h in for us
+#include "RcppArmadillo.h"
+using namespace Rcpp;
+
+// via the depends attribute we tell Rcpp to create hooks for
+// RcppArmadillo so that the build process will know what to do
+//
+// [[Rcpp::depends(RcppArmadillo)]]
+
+//
+// via the exports attribute we tell Rcpp to make this function
+// available from R
+//
+
+//' Low pass filtering of a vector.
+//' 
+//' This function returns a vector which is x through a unity gain first-order low-pass filter.
+//'
+//' @name lp_vector_cpp
+//' @param x A numeric vector
+//' @param a1 the first order low-pass filter coefficient
+
+// [[Rcpp::export]]
+
+NumericVector lp_vector_cpp(NumericVector x, double a1) {
+  int n = x.size();
+  NumericVector y(n);
+  double oma1 = (1-a1);
+
+  // First value in x is the init value of y
+  y[0] = x[0];
+
+  for(int i = 1; i < n; ++i) {
+    if(NumericVector::is_na(y[i-1])){
+      y[i] = x[i];
+    }else{
+      y[i] = a1*y[i-1] + oma1*x[i];
+    }
+  }
+  // Return (afterwards the init value (i.e. first value in y[0]), must be handled)
+  return y;
+}
+
+
diff --git a/src/rls_update_cpp.cpp b/src/rls_update_cpp.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efeb2c933ea00d87b89ab912f25288f3722b9155
--- /dev/null
+++ b/src/rls_update_cpp.cpp
@@ -0,0 +1,139 @@
+// we only include RcppArmadillo.h which pulls Rcpp.h in for us
+#include "RcppArmadillo.h"
+using namespace Rcpp;
+
+// via the depends attribute we tell Rcpp to create hooks for
+// RcppArmadillo so that the build process will know what to do
+//
+// [[Rcpp::depends(RcppArmadillo)]]
+
+//' Calculating k-step recursive least squares estimates
+//' 
+//' This function applies the k-step recursive least squares scheme to estimate
+//' parameters in a linear regression model.
+//'
+//' @name rls_update_cpp
+//' @param y Vector of observation
+//' @param X Matrix of input variables (design matrix)
+//' @param theta Vector of parameters (initial value)
+//' @param P Covariance matrix (initial value)
+//' @param lambda Forgetting factor
+//' @param k Forecast horizon
+//' @param n Length of the input
+//' @param np Dimension of P (np x np)
+//' @param istart Start index
+//' @param kmax Keep only the last kmax rows for next time
+
+// [[Rcpp::export]]
+
+Rcpp::List rls_update_cpp(arma::vec y,
+                          arma::mat X, // 
+			  arma::vec theta, // 
+                          arma::mat P, // 
+                          double lambda, // 
+                          unsigned int k, //
+                          unsigned int n, // length of input
+                          unsigned int np, // dimension of P (npxnp)
+                          unsigned int istart,
+                          unsigned int kmax
+) {
+
+  // -9999 will be treated as NA in R
+  arma::mat Theta(n, np);  Theta.fill(NA_REAL); // The parameter matrix
+  NumericVector yhat(n);  yhat.fill(NA_REAL); // The predictions k steps ahead
+  // Not used: NumericVector yhat_lagged(n); yhat_lagged.fill(NA_REAL); // The predictions synced with y, e.g. y-yhat_lagged is the prediction error
+  // arma::vec SigmaPredLocal(n);  SigmaPredLocal.fill(-9999); // The local variance of prediction
+  // arma::vec SigmaPredGlobal(n);  SigmaPredGlobal.fill(-9999); // The global variance of prediction
+  // arma::vec SigmaUpLocal(n); SigmaUpLocal.fill(-9999); // The local variance of update
+  // arma::vec SigmaUpGlobal(n); SigmaUpGlobal.fill(-9999); // The global variance of update
+  // arma::vec CUp(n); CUp.fill(-9999);
+  // arma::vec CPred(n); CPred.fill(-9999);
+  // arma::vec DegreesUp(n); DegreesUp.fill(-9999); // The degrees of freedom used for update
+  // arma::vec DegreesPred(n); DegreesPred.fill(-9999); // The degrees of freedom used for prediction
+  // arma::vec Lambda(n); Lambda.fill(-9999); // The forgetting factor used
+  // arma::vec TraceP(n); TraceP.fill(-9999); // The trace of P after update
+
+  arma::vec K(np);
+  arma::vec x(np);
+  double sigsca;
+  double cup;//, cpred;
+  // double sigmaupglobal;
+  // double sigmauplocal;
+  // double sigmapredglobal;
+  // double sigmapredlocal;
+  double err;
+  // double err2;
+  // double cerr2 = 0;
+  // int degrees = 0;
+
+  // Return
+  Rcpp::List fit;
+  Rcpp::List result;
+
+  // Iterate through
+  for(unsigned int i = istart-1 ; i < n; i++) {
+
+    // Take the forecast k steps back to match it with y[i]
+    // The regressor vector, take the forecasts k steps back from X
+    x = arma::trans(X.row(i-k));
+
+    if(!NumericVector::is_na(y[i]) && !x.has_nan()){
+      // Update
+      cup = arma::as_scalar(x.t() * P * x);
+      sigsca = arma::as_scalar(lambda + cup);
+
+      K = (P * x) / sigsca; //gain
+      err = arma::as_scalar(y[i] - x.t() * theta); //residual
+      // err2 = err*err; // squared residual
+      // cerr2 += err2; // cumulated squared residual
+      // degrees +=1;
+      // sigmauplocal = sqrt(cerr2/degrees);
+      // sigmaupglobal = sqrt(cerr2/(degrees*sigsca));
+
+      // Ordinary RLS P  and theta update
+      P = (1/lambda) * P - (K * x.t() * P);
+      theta += K * err;
+
+      // Save results
+      //      TraceP(i) = trace(P);
+      //Lambda(i) = lambda;
+      Theta.row(i) = theta.t();
+      //SigmaUpLocal(i) = sigmauplocal;
+      //SigmaUpGlobal(i) = sigmaupglobal;
+      //CUp(i) = cup;
+      //DegreesUp(i) = degrees;
+    }
+
+    // Make the prediction taking the forecasts at the step i
+    x = arma::trans(X.row(i));
+//    if(!x.has_nan()){
+      yhat[i] = arma::as_scalar(x.t() * theta);
+//    }
+  }
+
+  // Keep the fit
+  // First only last part of yhat
+  arma::vec tmp(kmax);
+  for(unsigned int i = 0; i < kmax; i++) {
+    tmp[i] = yhat[n-kmax+i];
+  }
+  fit = List::create(Named("k") = k,
+                           Named("theta") = theta,
+                           Named("P") = P,
+                           Named("yhat") = tmp);
+
+  // Keep the result
+  // // First lag yhat k steps
+  // for(unsigned int i = k; i < n; i++) {
+  //   yhat_lagged[i] = yhat[i-k];
+  // }
+  // 
+  // Maybe give names to the matrices (its just missing innames as an argument)
+  //Theta.attr("dimnames") = Rcpp::List::create(NULL, innames)
+  //
+  result = List::create(Named("yhat") = yhat,
+                        Named("Theta") = Theta);
+  // Return Theta and the result
+  return List::create(Named("fit") = fit,
+                      Named("result") = result);
+}
diff --git a/tests/run_testthat.R b/tests/run_testthat.R
new file mode 100644
index 0000000000000000000000000000000000000000..1f95ad427fd367d4fcb346d8e0afdbe07dfbf8c5
--- /dev/null
+++ b/tests/run_testthat.R
@@ -0,0 +1,4 @@
+library(testthat)
+library(onlineforecast)
+
+test_check("onlineforecast")
diff --git a/tests/testthat/savedcheck/test-rls-heat_Yhat.RDS b/tests/testthat/savedcheck/test-rls-heat_Yhat.RDS
new file mode 100644
index 0000000000000000000000000000000000000000..1e88a518c9e602e840087fc7b6e99d7b4a5ae9ad
Binary files /dev/null and b/tests/testthat/savedcheck/test-rls-heat_Yhat.RDS differ
diff --git a/tests/testthat/savedcheck/test-rls-heat_model.RDS b/tests/testthat/savedcheck/test-rls-heat_model.RDS
new file mode 100644
index 0000000000000000000000000000000000000000..3f4d700c2b88dcf7c72c9cb92ce40401ea62caad
Binary files /dev/null and b/tests/testthat/savedcheck/test-rls-heat_model.RDS differ
diff --git a/tests/testthat/test-rls-heat-load.R b/tests/testthat/test-rls-heat-load.R
new file mode 100644
index 0000000000000000000000000000000000000000..aa6b5ccf537258234feb36898424415d740c0d98
--- /dev/null
+++ b/tests/testthat/test-rls-heat-load.R
@@ -0,0 +1,77 @@
+context("running RLS test")
+
+## Load current package (must be outcommented)
+## library("devtools")
+## load_all(as.package("../../../onlineforecast"))
+##dir.create("savedcheck")    
+
+test_that("run", {
+
+    ## ------------------------------------------------------------------------
+    D <- Dbuildingheatload
+    D$y <- D$heatload
+    D$tday <- make_tday(D$t, kseq=1:36)
+
+    ## ------------------------------------------------------------------------
+    D <- subset(D, c("2010-12-15", "2011-02-01"))
+    Dtrain <- subset(D, c("2010-12-15", "2011-01-01"))
+    Dtrain$scoreperiod <- in_range("2010-12-20", Dtrain$t)
+
+    ## ------------------------------------------------------------------------
+    model <- forecastmodel$new()
+    model$output = "y"
+    model$add_inputs(Ta = "lp(Ta, a1=0.9)", 
+                     I = "lp(I, a1=0.7)", 
+                     mu_tday = "fs(tday/24, nharmonics=10)",
+                     mu = "ones()")
+    model$add_regprm("rls_prm(lambda=0.9)")
+
+    ## ------------------------------------------------------------------------
+    model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.9999),
+                        I__a1 =  c(0.4, 0.8, 0.9999),
+                        lambda = c(0.9, 0.99, 0.9999))
+
+    ## ------------------------------------------------------------------------
+    model$kseq <- c(1,18)
+    model$prm <- rls_optim(model, Dtrain, control=list(maxit=2), cachedir="", printout=FALSE)$par
+    
+    model$kseq <- 1:36
+    val <- rls_fit(model$prm, model, D, returnanalysis=TRUE, printout=FALSE)
+
+    ## Keep the result for later check
+    D$Yhat1 <- val$Yhat
+
+    ## ------------------------------------------------------------------------
+    ## Save for later check
+    filenm <- "savedcheck/test-rls-heat_Yhat.RDS"
+    ##saveRDS(D$Yhat1, filenm)
+    ## Load to check that is same as saved
+    expect_equal(D$Yhat1, readRDS(filenm))
+
+    ## ------------------------------------------------------------------------
+    ## Do it recursively
+    ## ------------------------------------------------------------------------
+    itrain <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+    itest <- which(in_range("2011-01-01",D$t,"2011-01-01 12:00"))
+    rls_fit(model$prm, model, subset(D, itrain), printout=FALSE)
+
+    D$Yhat2 <- data.frame(matrix(NA, nrow(D$Yhat1), ncol(D$Yhat1)))
+    names(D$Yhat2) <- names(D$Yhat1)
+    for(i in itest){
+        Dnew <- subset(D, i)
+        Dnewtr <- model$transform_data(Dnew)
+        rls_update(model, Dnewtr, Dnew[[model$output]])
+        D$Yhat2[i, ] <- as.numeric(rls_predict(model, Dnewtr))
+    }
+
+    ## ------------------------------------------------------------------------
+    ## Check that the recursive and fit forecasts are close to each other
+    expect_true(sum(D$Yhat1[itest, ] - D$Yhat2[itest, ]) < 0.00001)
+})
+
+
+
+
+
+
+
diff --git a/vignettes/forecast-evaluation.Rmd b/vignettes/forecast-evaluation.Rmd
new file mode 100644
index 0000000000000000000000000000000000000000..9a9ea8c429b9a974a60d0106aa1abd2187d68e1e
--- /dev/null
+++ b/vignettes/forecast-evaluation.Rmd
@@ -0,0 +1,407 @@
+---
+title: "Forecast evaluation"
+author: "Peder Bacher"
+date: "`r Sys.Date()`"
+output: rmarkdown::html_vignette
+vignette: >
+  %\VignetteIndexEntry{Forecast evaluation}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding{UTF-8}
+bibliography: literature.bib
+---
+
+```{r external-code, cache=FALSE, include=FALSE, purl = FALSE}
+# Have to load the knitr to use hooks
+library(knitr)
+# This vignettes name
+vignettename <- "forecast-evaluation"
+# Read external code from init.R
+knitr::read_chunk("init.R")
+```
+```{r init, cache=FALSE, include=FALSE, purl=FALSE}
+```
+
+
+## Intro
+This vignette provides a short overview of the basics of forecast evaluation
+with the functions from the onlineforecast package. It follows up on the
+vignettes ??{ref} and ??{ref} and continues the building load forecast modelling
+presented there. If something is introduced in the present text, but not
+explained, then have a look in the two preceding vignettes to find an explanation.
+
+## Load forecasts
+
+First Load the package, setup the model and calculate the forecasts:
+```{r}
+# Load the package
+#library(onlineforecast)
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+```
+
+Just start by:
+```{r}
+# Keep the data in D to simplify notation
+D <- Dbuildingheatload
+# Keep the model output in y (just easier code later)
+D$y <- D$heatload
+# 
+D$tday <- make_tday(D$t, 0:36)
+```
+
+
+
+## Score period
+
+### Score period
+Set the `scoreperiod` as a logical vector to control which points will be
+included in score calculations.
+
+Use it to exclude a burn-in period of one week:
+```{r}
+# Set the score period
+D$scoreperiod <- in_range("2010-12-22", D$t)
+```
+
+
+### Train period
+
+One fundamental caveat in data-driven modelling is over-fitting a model. This
+can easily happen when the model is fitted (trained) and evaluated on the same
+data. There are essentially two ways of dealing with this: Penalize increased
+model complexity or divide into a training set and test set (cross-validation). 
+
+In most forecasting applications the easiest and most transparent approach
+is some cross-validation approach - many methods for dividing into sets have
+been suggested. For online forecasting it is luckily quite straight forward,
+when a model is fitted using a recursive estimation method, like the RLS. In
+each time step the following happens:
+
+- Data for the new time point becomes available, both observations and new
+  forecasts.
+  
+- The parameters in the model are updated.
+
+- A new forecast is calculated.
+
+Hence, those forecasts are only calculated based on past data, so there is
+no need for dividing into a training set and a test set!
+
+However, the parameters (like the forgetting factor and low-pass filter
+coefficients) are optimized on a particular period, hence over-fitting is
+possible, however it's most often very few parameters compared to the number of
+observations - so the it's very unlikely to over-fit a recursive fitted model in
+this setup.
+
+
+
+## Models
+
+
+```{r, output.lines=10}
+# Define a new model with low-pass filtering of the Ta input
+model <- forecastmodel$new()
+model$output = "y"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+                 I = "lp(I, a1=0.9)",
+                 mu = "ones()")
+model$add_prmbounds(Ta__a1 = c(0.8, 0.9, 0.99),
+             I__a1 = c(0.6, 0.9, 0.99),
+             lambda = c(0.9, 0.99, 0.9999))
+model$add_regprm("rls_prm(lambda=0.9)")
+model$kseq <- c(3,18)
+# Optimize the parameters
+model$prm <- rls_optim(model, D)$par
+```
+
+Fit for all horizons and see the fit summary:
+```{r}
+# Fit for all horizons
+model$kseq <- 1:36
+# Fit with RLS
+fit1 <- rls_fit(model$prm, model, D)
+# Check the fit
+summary(fit1)
+```
+
+Let us extend the model by adding a new input: A diurnal pattern comprised by
+Fourier series. It can simply be added to the current model object:
+```{r, output.lines=10}
+# Add a diurnal curve using fourier series
+model$add_inputs(mu_tday = "fs(tday/24, nharmonics=4)")
+model$kseq <- c(3,18)
+# Optimize the parameters
+model$prm <- rls_optim(model, D)$par
+```
+
+Fit for all horizons and see the fit summary:
+```{r}
+# Fit for all horizons
+model$kseq <- 1:36
+# Fit with RLS
+fit2 <- rls_fit(model$prm, model, D)
+# Check the fit
+summary(fit2)
+```
+
+
+
+Keep the forecasts for plotting and later analysis:
+```{r}
+# Keep the forecasts from each model
+D$Yhat1 <- fit1$Yhat
+D$Yhat2 <- fit2$Yhat
+```
+
+Plot the full score period:
+```{r, fig.height=figheight2}
+# Plot to see the forecasts for the shortest and the longest horizon
+plot_ts(subset(D,D$scoreperiod), c("^y|^Yhat1","^y|^Yhat2"), kseq = c(1,36))
+```
+
+Plot the full the first 14 days of the score period:
+```{r, fig.height=figheight2}
+# Plot to see the forecasts for the shortest and the longest horizon
+plot_ts(subset(D,which(D$scoreperiod)[1:(14*24)]), c("^y|^Yhat1","^y|^Yhat2"), kseq = c(1,36))
+```
+We can see how adding the diurnal pattern enables to track the morning shower peaks.
+
+
+
+## Reference models
+
+The performance of a forecast model should be compared to a reference
+model. This is however not at all trivial, since the suitable reference model
+depends on the particular case of forecasting, e.g. the suitable reference model
+for wind power forecasting is not the same as for solar power forecasting -
+even within the same application the suitable reference model can be different
+depending on particular conditions etc. ??(referencer)
+
+In general the fundamental reference model should be the simplest reasonable
+model not relying on any inputs, hence either a model based on a mean
+calculation or some persistence should used. It can also be, that the study is
+about concluding the value of using NWPs as input, and in that case the
+reference model should be the best model without the NWPs.
+
+We will here demonstrate how to generate persistence forecasts, both a
+persistence with the current model output and a diurnal persistence, which uses
+the latest value lagged a given period from the forecast time point.
+
+First the simple persistence:
+```{r}
+# Just keep the horizons
+kseq <- 1:36
+# The simple persistence
+D$YhatP <- persistence(D$y, kseq)
+# Plot a few horizons
+plot_ts(D, c("^y$|YhatP$"), c("2011-01-05","2011-01-10"), kseq=c(1,24,36))
+```
+Remember that the forecasts are lagged in the plot. Maybe it's even more obvious
+to see that it's simply the current value for all horizons:
+```{r}
+D$YhatP[1:4, 1:8]
+```
+
+A diurnal (i.e. 24 hours) persistence is: Take the value from the most recent
+time point, at the same time of day, as the forecast time point
+(i.e. `tod(t+k)`). It can be obtained by:
+```{r}
+# Use the argument perlen to set the period length
+D$YhatDP <- persistence(D$y, kseq, perlen=24)
+# Plot a few horizons
+plot_ts(D, c("^y$|YhatDP$"), c("2011-01-05","2011-01-10"), kseq=c(1,24,36))
+```
+Note how going beyond the `perlen` value, then the forecasts are the 48 hours
+lag values (going >48 the forecasts will be 72 lagged and so fourth).
+
+
+## Score comparison
+
+Now it's just a matter of calculating the score, as a function of the horizon,
+for each model and compare them.
+
+We have kept the forecasts in the same format for each model, we can find them by:
+```{r}
+# Find the forecasts in D
+nms <- grep("^Yhat", names(D), value=TRUE)
+nms
+```
+So it's the small model, large model, and simple and diurnal persistence, respectively.
+
+One quite important point: When comparing forecasts from different models
+exactly the same forecast points must be included. When NAs are present, not all
+models predict the same values, e.g. a persistence model will leave forecasts
+after NAs, also as NAs.
+
+So to make sure that exactly the same points are included in the score
+calculation, we must only forecast points where all forecasts are available
+(i.e. non-NA):
+```{r}
+# The non-NA for the first forecast
+ok <- !is.na(D[[nms[1]]])
+# Go through the remaining: all must be non-NA for a point
+for(nm in nms[-1]){
+    ok <- ok & !is.na(D[[nm]])
+}
+ok <- as.data.frame(ok)
+names(ok) <- pst("k",kseq)
+# Lag to match resiuduals in time
+ok <- lag(ok, "+k")
+# Only the score period
+ok <- ok & D$scoreperiod
+# Finally, the vector with TRUE for all points with no NAs for any forecast
+ok <- apply(ok, 1, all)
+```
+
+How many points are left?
+```{r}
+sum(ok)
+length(ok)
+```
+
+Now the residuals can be calculated and the score:
+```{r}
+# Use the residuals function
+R <- residuals(D$Yhat1, D$y)
+# And the score as a function of the horizon
+score_for_k(R, scoreperiod=ok)$scoreval
+```
+
+
+Calculated the score (default is RMSE) for all models:
+```{r}
+RMSE <- sapply(nms, function(nm){
+    score_for_k(residuals(D[[nm]],D$y), ok)$scoreval
+})
+```
+    
+```{r, include=FALSE}
+# sapply(kseq, function(k){
+#     rmse(y - lag(YhatDM[ ,pst("k",k)], k))
+#     # hej det er vilfred jeg er peders søn og jeg elsker min far go jeg god til matematik og jeg elsker også min mor 
+# })
+```
+
+Plot the RMSE as a function of the horizon:
+```{r, fig.height=figheight2}
+RMSE <- as.data.frame(RMSE)
+names(RMSE) <- nms
+
+plot(0, type="n", xlim=range(kseq), ylim=range(RMSE), xlab="Horizon k", ylab="RMSE (kW)")
+for(i in 1:length(RMSE)){
+    points(kseq, RMSE[ ,i], type="b", col=i)
+}
+```
+
+
+
+
+### Training set and test set
+
+As explained, it is most times not necessary to divide in a train and a test
+set, when fitting recursively, however it can be useful sometimes. 
+
+An easy approach is to set a logical vector, which is TRUE until the end of the
+training period:
+```{r plottrain}
+D$trainperiod <- in_range(D$t[1]-1, D$t, "2011-02-01")
+plot(D$t, D$trainperiod)
+```
+then optimize the parameters only on this period, by taking a subset:
+```{r, output.lines=10}
+model$kseq <- c(3,18)
+# Optimize the parameters
+model$prm <- rls_optim(model, subset(D,D$trainperiod))$par
+```
+
+and then fit on the entire set:
+```{r}
+# Fit for all horizons
+model$kseq <- 1:36
+# Fit with RLS
+fittmp <- rls_fit(model$prm, model, D)
+```
+
+Finally, the score can be calculated on the period following the train period by:
+```{r scorefit}
+score_fit(fittmp, !D$trainperiod)$scoreval
+```
+
+In this way it's rather easy to set up different schemes, like optimizing the
+parameters once a week etc.
+
+
+
+## Residual analysis and model validation
+
+In the process of developing good forecasting models it is the always an
+interesting and informative experience (and necessary) to investigate the
+results of a model. Most of the time it boils down to investigating if there are
+any significant patterns left in the residuals - and how, if any, they can be
+described by extending the model.
+
+Plot for the small model:
+```{r plot1, fig.height=figheight5}
+kseq <- c(1,18,36)
+plot_ts(fit1, kseq=kseq)
+```
+In the second plot we see the residuals and it's clear that there is a diurnal
+pattern - and in the lower three plots the coefficients has also a diurnal pattern.
+
+Plot for the larger model (plots not included here):
+```{r, fig.height=figheight5, fig.keep="none"}
+plot_ts(fit2, kseq=kseq)
+```
+
+A shorter period (plots not included here):
+```{r, fig.height=figheight5, fig.keep="none"}
+xlim <- c("2011-01-01","2011-01-14")
+plot_ts.rls_fit(fit1, xlim=xlim, kseq=kseq)
+plot_ts(fit2, xlim=xlim, kseq=kseq)
+```
+
+The data plotted is returned:
+```{r tscoef}
+tmp <- plot_ts(fit2, kseq=kseq, plotit=FALSE)
+class(tmp)
+names(tmp)
+# Residuals
+plot_ts(tmp, c("^Residuals"), kseq=kseq)
+# All RLS coefficients
+nms <- names(fit2$Lfitval$k1)
+plot_ts(tmp, pattern=nms, kseq=kseq)
+```
+
+```{r, fig.height=figheight3}
+plot_ts(tmp, pattern=c("^y$|^Yhat",nms), c("2011-02-06","2011-02-10"), kseq=kseq)
+```
+
+The RLS coefficients are in the fit for each horizon:
+```{r rlscoefficients}
+str(fit1$Lfitval$k1)
+```
+
+A pairs plot with residuals and inputs ??comment if patterns are left:
+```{r plotpairs, fig.height=figwidth}
+kseq <- c(1,36)
+D$Residuals <- residuals(fit2)[ ,pst("h",kseq)]
+D$hour <- aslt(D$t)$hour
+pairs.data.list(D, subset=D$scoreperiod, pattern="Residuals|Ta|I|hour|^t$", kseq=kseq)
+```
+
+Histograms and box-plots to find patterns. From the histogram and qq-norm plot:
+
+```{r}
+par(mfrow=c(1,2))
+hist(D$Residuals$h1)
+qqnorm(D$Residuals$h1)
+qqline(D$Residuals$h1)
+```
+We can see, that the residuals are symmetrical with heavy tails.
+
+From boxplots we see the distribution as a function of the time of day:
+```{r}
+boxplot(D$Residuals$h1 ~ D$tday$k0)
+```
+They seem to be symmetrical and centered around zero, hence no pattern left in
+the mean - only the variance change.
diff --git a/vignettes/init.R b/vignettes/init.R
new file mode 100644
index 0000000000000000000000000000000000000000..0195156219f50ae67abe84c599ad0bd8f2a9fe0a
--- /dev/null
+++ b/vignettes/init.R
@@ -0,0 +1,58 @@
+# ---- init
+
+# Width will scale all
+figwidth <- 12
+# Scale the wide figures (100% out.width)
+figheight <- 4
+# Heights for stacked time series plots
+figheight1 <- 5
+figheight2 <- 6.5
+figheight3 <- 8
+figheight4 <- 9.5
+figheight5 <- 11
+# Set the size of squared figures (same height as full: figheight/figwidth)
+owsval <- 0.35
+ows <- paste0(owsval*100,"%")
+ows2 <- paste0(2*owsval*100,"%")
+# 
+fhs <- figwidth * owsval
+
+# Set for square fig: fig.width=fhs, fig.height=fhs, out.width=ows}
+# If two squared the:  fig.width=2*fhs, fig.height=fhs, out.width=ows2
+
+# Check this: https://bookdown.org/yihui/rmarkdown-cookbook/chunk-styling.html
+# Set the knitr options
+knitr::opts_chunk$set(
+  collapse = TRUE,
+  comment = "##    ",
+  prompt = FALSE,
+  cache = TRUE,
+  cache.path = paste0("tmp-output/tmp-",vignettename,"/"),
+  fig.align="center",
+  fig.path = paste0("tmp-output/tmp-",vignettename,"/"),
+  fig.height = figheight,
+  fig.width = figwidth,
+  out.width = "100%"
+)
+options(digits=3)
+
+hook_output <- knit_hooks$get("output")
+knit_hooks$set(output = function(x, options) {
+  lines <- options$output.lines
+  if (is.null(lines)) {
+    return(hook_output(x, options))  # pass to default hook
+  }
+  x <- unlist(strsplit(x, "\n"))
+  more <- "## ...output cropped"
+  if (length(lines)==1) {        # first n lines
+    if (length(x) > lines) {
+      # truncate the output, but add ....
+      x <- c(head(x, lines), more)
+    }
+  } else {
+    x <- c(more, x[lines], more)
+  }
+  # paste these lines together
+  x <- paste(c(x, ""), collapse = "\n")
+  hook_output(x, options)
+})
diff --git a/vignettes/literature.bib b/vignettes/literature.bib
new file mode 100644
index 0000000000000000000000000000000000000000..5378d5b42378a5beaebd1579eb45e9d758fb7900
--- /dev/null
+++ b/vignettes/literature.bib
@@ -0,0 +1,5705 @@
+% Encoding: UTF-8
+
+@Article{Aban2006,
+  author    = {Aban, Inmaculada B and Meerschaert, Mark M and Panorska, Anna K},
+  title     = {Parameter Estimation for the Truncated Pareto Distribution},
+  journal   = {Journal of the American Statistical Association},
+  year      = {2006},
+  volume    = {101},
+  number    = {473},
+  pages     = {270-277},
+  abstract  = { The Pareto distribution is a simple model for nonnegative data with a power law probability tail. In many practical applications, there is a natural upper bound that truncates the probability tail. This article derives estimators for the truncated Pareto distribution, investigates their properties, and illustrates a way to check for fit. These methods are illustrated with applications from finance, hydrology, and atmospheric science. },
+  doi       = {10.1198/016214505000000411},
+  eprint    = {http://amstat.tandfonline.com/doi/pdf/10.1198/016214505000000411},
+  file      = {Aban2006.pdf:Aban2006.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  timestamp = {2014.04.10},
+  url       = {http://amstat.tandfonline.com/doi/abs/10.1198/016214505000000411},
+}
+
+@Article{Ahmad2011,
+  Title                    = {Solar radiation models-A review},
+  Author                   = {Ahmad, M. Jamil and Tiwari, G.N.},
+  Journal                  = {International Journal of Energy Research},
+  Year                     = {2011},
+  Number                   = {4},
+  Pages                    = {271-290},
+  Volume                   = {35},
+
+  Abstract                 = {Solar radiation models for predicting the average daily and hourly global radiation, beam radiation and diffuse radiation on horizontal surface are reviewed in this article. Estimations of monthly average hourly global radiation from daily summations are discussed. It was observed that Collares-Pereira and Rabl model as modified by Gueymard (CPRG) yielded the best performance for estimating mean hourly global radiation incident on a horizontal surface for Indian regions. Estimations of monthly average hourly beam and diffuse radiation are discussed. It was observed that Singh-Tiwari and Jamil-Tiwari both models generally give better results for climatic conditions of Indian regions. Therefore, their use is recommended for composite climate of Indian regions. Empirical correlations developed to establish a relationship between the hourly diffuse fraction and the hourly clearness index using hourly global and diffuse irradiation measurements on a horizontal surface are discussed. Fifty models using the Angstrom-Prescott equation to predict the average daily global radiation with hours of sunshine are considered. It was reported that Ertekin and Yaldiz model showed the best performance against measured data of Konya, Turkey. Copyright © 2010 John Wiley & Sons, Ltd.},
+  Doi                      = {10.1002/er.1690},
+  File                     = {Ahmad2011.pdf:Ahmad2011.pdf:PDF},
+  ISSN                     = {0363907x, 1099114x},
+  Owner                    = {pb},
+  Timestamp                = {2011.04.04}
+}
+
+@Article{Al-Sanea2002,
+  author                     = {Al-Sanea, SA},
+  title                      = {Thermal performance of building roof elements},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {2002},
+  volume                     = {37},
+  number                     = {7},
+  pages                      = {665-675},
+  month                      = {JUL},
+  issn                       = {0360-1323},
+  abstract                   = {The study concerns the evaluation and comparison of the thermal performance of building roof elements subject to periodic changes in ambient temperature, solar radiation and nonlinear radiation exchange. A numerical model, based on the finite-volume method and using the implicit formulation, is developed and applied for six variants of a typical roof structure used in the construction of buildings in Saudi Arabia. The climatic conditions of the city of Riyadh are employed for representative days for July and January. The study gives the detailed temperature and heat flux variations with time and the relative importance of the various heat-transfer components as well as the daily averaged roof heat-transfer load, dynamic R-values and the radiative heat-transfer coefficient. The results show that the inclusion of a 5-cm thick molded polystyrene layer reduces the roof heat-transfer load to one-third of its value in an identical roof section without insulation. Using a polyurethane layer instead, reduces the load to less than one-quarter. A slightly better thermal performance is achieved by locating the insulation layer closer to the inside surface of the roof structure but this exposes the water proofing membrane layer to larger temperature fluctuations. (C) 2002 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Al-Sanea2002.pdf:Al-Sanea2002.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {23},
+  owner                      = {pb},
+  times-cited                = {17},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000176544000002},
+}
+
+@Article{Andersen2014,
+  author    = {Andersen, PhilipDelff and Jiménez, MaríaJosé and Madsen, Henrik and Rode, Carsten},
+  title     = {Characterization of heat dynamics of an arctic low-energy house with floor heating},
+  journal   = {Building Simulation},
+  year      = {2014},
+  pages     = {1-20},
+  issn      = {1996-3599},
+  doi       = {10.1007/s12273-014-0185-4},
+  file      = {Andersen2014.pdf:Andersen2014.pdf:PDF},
+  groups    = {Buildings},
+  keywords  = {grey-box models; statistical modeling; low-energy buildings; arctic technology; time series analysis},
+  language  = {English},
+  owner     = {pb},
+  publisher = {Tsinghua University Press},
+  timestamp = {2014.05.22},
+  url       = {http://dx.doi.org/10.1007/s12273-014-0185-4},
+}
+
+@Article{Andersen2013,
+  author    = {Andersen, Philip Delff and Iversen, Anne and Madsen, Henrik and Rode, Carsten},
+  title     = {Dynamic modeling of presence of occupants using inhomogeneous Markov chains},
+  journal   = {Energy and Buildings},
+  year      = {2013},
+  volume    = {{}},
+  number    = {0},
+  pages     = {{}},
+  issn      = {0378-7788},
+  abstract  = {Abstract Occupancy modeling is a necessary step towards reliable simulation of energy consumption in buildings. This paper outlines a method for fitting recordings of presence of occupants and simulation of single-person to multiple-persons office environments. The method includes modeling of dependence on time of day, and by use of a filter of the observations it is able to capture per-employee sequence dynamics. Simulations using this method are compared with simulations using homogeneous Markov chains and show far better ability to reproduce key properties of the data. The method is based on inhomogeneous Markov chains with where the transition probabilities are estimated using generalized linear models with polynomials, B-splines, and a filter of passed observations as inputs. For treating the dispersion of the data series, a hierarchical model structure is used where one model is for low presence rate, and another is for high presence rate. },
+  doi       = {http://dx.doi.org/10.1016/j.enbuild.2013.10.001},
+  file      = {Andersen2013.pdf:Andersen2013.pdf:PDF},
+  groups    = {Other models},
+  owner     = {pb},
+  timestamp = {2013.11.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0378778813006397},
+}
+
+@Article{Antonopoulos2000,
+  author    = {K.A. Antonopoulos and E.P. Koronaki},
+  title     = {Thermal parameter components of building envelope},
+  journal   = {Applied Thermal Engineering},
+  year      = {2000},
+  volume    = {20},
+  number    = {13},
+  pages     = {1193-1211},
+  issn      = {1359-4311},
+  abstract  = {A procedure is presented for analyzing the effective thermal capacitance, the time constant and the thermal delay of buildings into components corresponding to discrete sections of the envelope (i.e. the roof or a whole wall of a specified orientation), to envelope parts of different compositions (i.e. the brickwork and the concrete parts of the envelope), or even to the layers of the exterior multilayer walls. Correlations are also developed, which express the dynamic thermal parameters of buildings in terms of the thickness of exterior wall layers and the surface percentage of envelope parts with different compositions. The effective layer thickness is introduced, the increase of which causes negligible increase in the building thermal capacitance. The developed procedure is based on finite-difference solution of a rigorous set of coupled differential equations describing the dynamic thermal behaviour of buildings. The analysis made quantifies the thermal contribution of every element of the envelope and may improve its thermal behaviour if the related conclusions are taken into consideration in the design of buildings.},
+  doi       = {10.1016/S1359-4311(99)00090-3},
+  file      = {Antonopoulos2000.pdf:Antonopoulos2000.pdf:PDF},
+  groups    = {single house forecasting},
+  keywords  = {Components of thermal parameters},
+  owner     = {pb},
+  timestamp = {2012.03.20},
+}
+
+@TechReport{Authority2005,
+  Title                    = {Heat Supply in Denmark - Who What Where and Why},
+  Author                   = {The Danish Energy Authority},
+  Year                     = {2005},
+
+  File                     = {Authority2005.pdf:Authority2005.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.03.22},
+  Url                      = {www.ens.dk}
+}
+
+@MastersThesis{Bacher2008,
+  Title                    = {Short-term Solar Power Forecasting},
+  Author                   = {Bacher, Peder},
+  School                   = {Technical University of Denmark},
+  Year                     = {2008},
+  Note                     = {IMM-M.Sc.-2008-13},
+
+  Abstract                 = {The share of the global power production coming from solar power is increasing. Forecasts of solar power is a key point for a successful integration of the solar power production into the existing electricity grid. In the present thesis a solar power forecasting method is developed. The overall production of 21 grid-connected photovoltaic (PV) systems with peak power in the range of 1 kWp to 4 kWp is used. The PV systems are located within an area of a few square kilometers. Numerical weather predictions (NWP) of global irradiation from the mesoscale NWP model DMI-Hirlam, is also used as input to the method. A clear sky model only based on solar power observations is developed. It is used to transform the solar power process into a stationary process that resembles the transmittance of the atmosphere. This process is modeled with linear models and the best model both with and without NWPs as input is identi¯ed. Adaptive estimation is found to be a requisite. Therefore the prediction models of the transformed solar power process are fitted using k-step recursive least squares with forgetting. The evaluation focus on solar power forecasts for the purpose of bidding into the electricity market Elspot. The forecasts are issued at 12:00 UTC each day and consist of hour value predictions up to a 36 hour horizon of solar power. These forecasts are evaluated and compared to a persistence reference model. The achieved results clearly indicate an increasing performance for next day horizons (12 to 36 hours), by the model using NWPs as input. Whereas for very short-term predictions (less than 6 hours) the solar power observations are the most important input. Finally ideas for both refinements and extensions to the method are outlined, together with a suggestion for the development of a framework for standardization of solar power forecasting method evaluation.},
+  File                     = {Bacher2008.pdf:Bacher2008.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.24}
+}
+
+@Article{Bacher2016,
+  Title                    = {Non-parametric method for separating domestic hot water heating spikes and space heating},
+  Author                   = {Peder Bacher and {de Saint-Aubain}, {Philip Anton} and Christiansen, {Lasse Engbo} and Henrik Madsen},
+  Journal                  = {Energy and Buildings},
+  Year                     = {2016},
+  Pages                    = {107--112},
+  Volume                   = {130},
+
+  Doi                      = {10.1016/j.enbuild.2016.08.037},
+  ISSN                     = {0378-7788},
+  Keywords                 = {Separation of total heat load, Kernel smoother, Robust estimation, Statistical modeling, Time series analyses, Smart grid, Smart metering, Heat metering},
+  Owner                    = {pbac},
+  Publisher                = {Elsevier BV},
+  Timestamp                = {2017.04.11}
+}
+
+@TechReport{Bacher2013c,
+  Title                    = {IEA Common Exercise 3: ARX models for thermal performance characterization based on constant indoor temperature experiments},
+  Author                   = {Bacher, P. and Delff, P.},
+  Institution              = {DTU Compute},
+  Year                     = {2013},
+
+  File                     = {Bacher2013c.pdf:Bacher2013c.pdf:PDF},
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2013.10.12}
+}
+
+@Article{Bacher2011,
+  author    = {Bacher, Peder and Madsen, Henrik},
+  title     = {Identifying suitable models for the heat dynamics of buildings},
+  journal   = {Energy \& Buildings},
+  year      = {2011},
+  volume    = {43},
+  number    = {7},
+  pages     = {1511-1522},
+  issn      = {03787788},
+  abstract  = {The present paper suggests a procedure for identification of suitable models for the heat dynamics of a building. Such a procedure for model identification is essential for better usage of readings from smart meters, which is expected to be installed in almost all buildings in the coming years. The models can be used for different purposes, e.g. control of the indoor climate, forecasting of energy consumption, and for accurate description of energy performance of the building. Grey-box models based on prior physical knowledge and data-driven modelling are applied. This facilitates insight into otherwise hidden information about the physical properties of the building. A hierarchy of models of increasing complexity is formulated based on prior physical knowledge and a forward selection strategy is suggested enabling the modeller to iteratively select suitable models of increasing complexity. The performance of the models is compared using likelihood ratio tests, and they are validated using a combination of appropriate statistics and physical interpretation of the results. A case study is described in which a suitable model is sought after for a single storey 120m2 building. The result is a set of different models of increasing complexity, with which building characteristics, such as: thermal conductivity, heat capacity of different parts, and window area, are estimated.},
+  doi       = {10.1016/j.enbuild.2011.02.005},
+  file      = {Bacher2011.pdf:Bacher2011.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2011.08.09},
+}
+
+@TechReport{Bacher2013a,
+  author      = {Peder Bacher and Henrik Madsen and {Aalborg Nielsen}, Henrik},
+  title       = {Load forecasting for supermarket refrigeration},
+  institution = {DTU Compute},
+  year        = {2013},
+  file        = {Bacher2013a.pdf:Bacher2013a.pdf:PDF},
+  groups      = {Forecasting, load},
+  owner       = {pb},
+  series      = {IMM-Technical Report-2013},
+  timestamp   = {2013.06.20},
+}
+
+@InProceedings{Bacher2011d,
+  author    = {Bacher, Peder and Madsen, Henrik and Nielsen, Henrik Aalborg},
+  title     = {Online Short-term Solar Power Forecasting},
+  booktitle = {1st International Workshop on the Integration of Solar Power into Power Systems},
+  year      = {2011},
+  file      = {Bacher2011d.pdf:Bacher2011d.pdf:PDF},
+  groups    = {Forecasting, solar},
+  owner     = {pb},
+  timestamp = {2012.08.27},
+}
+
+@Article{Bacher2012,
+  author    = {Peder Bacher and Henrik Madsen and Henrik Aalborg Nielsen and Bengt Perers},
+  title     = {Short-term heat load forecasting for single family houses},
+  journal   = {Energy and Buildings},
+  year      = {2013},
+  volume    = {65},
+  number    = {0},
+  pages     = {101-112},
+  issn      = {0378-7788},
+  abstract  = {Abstract This paper presents a method for forecasting the load for space heating in a single-family house. The forecasting model is built using data from sixteen houses located in Sønderborg, Denmark, combined with local climate measurements and weather forecasts. Every hour the hourly heat load for each house the following two days is forecasted. The forecast models are adaptive linear time-series models and the climate inputs used are: ambient temperature, global radiation and wind speed. A computationally efficient recursive least squares scheme is used. The models are optimized to fit the individual characteristics for each house, such as the level of adaptivity and the thermal dynamical response of the building, which is modeled with simple transfer functions. Identification of a model, which is suitable for all the houses, is carried out. The results show that the one-step ahead errors are close to white noise and that practically all correlation to the climate variables are removed. Furthermore, the results show that the forecasting errors mainly are related to: unpredictable high frequency variations in the heat load signal (predominant only for some houses), shifts in resident behavior patterns and uncertainty of the weather forecasts for longer horizons, especially for solar radiation. },
+  doi       = {http://dx.doi.org/10.1016/j.enbuild.2013.04.022},
+  file      = {Bacher2012.pdf:Bacher2012.pdf:PDF},
+  groups    = {Forecasting, load},
+  keywords  = {<!-- Tag Not Handled --><keyword id=#kw0005#>Heat load},
+  owner     = {pb},
+  timestamp = {2013.07.05},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0378778813002752},
+}
+
+@InProceedings{Bacher2011a,
+  author    = {Bacher, P. and Madsen, H. and Perers, B.},
+  title     = {Short-term solar collector power forecasting},
+  booktitle = {Proceedings of ISES Solar World Conference 2011},
+  year      = {2011},
+  abstract  = {This paper describes a new approach to online forecasting of power output from solar thermal collectors. The method is suited for online forecasting in many applications and in this paper it is applied to predict hourly values of power from a standard single glazed large area flat plate collector. The method is applied for horizons of up to 42 hours. Solar heating systems naturally come with a hot water tank, which can be utilized for energy storage also for other energy sources. Thereby such systems can become an important part of energy systems with a large share of uncontrollable energy sources, such as wind power. In such a scenario online forecasting is a vital tool for optimal control and utilization of solar heating systems. The method is a two-step scheme, where first a non-linear model is applied to transform the solar power into a stationary process, which then is forecasted with robust time-adaptive linear models. The approach is similar to the one by Bacher et al. (2009), but contains additional effects due to differences between solar thermal collectors and photovoltaics. Numerical weather predictions provided by Danish Meteorological Institute are used as input. The applied models adapt over time enabling tracking of changes in the system and in the surrounding conditions, such as decreasing performance due to wear and dirt, and seasonal changes such as leaves on trees. This furthermore facilitates remote monitoring and check of the system.},
+  file      = {Bacher2011a.pdf:Bacher2011a.pdf:PDF},
+  groups    = {Forecasting, solar},
+  owner     = {pb},
+  timestamp = {2011.11.23},
+}
+
+@InProceedings{Bacher2011b,
+  author    = {Bacher, P. and Madsen, H. and Perers, B.},
+  title     = {Models of the heat dynamics of solar collectors for performance testing},
+  booktitle = {Proceedings of ISES Solar World Conference 2011},
+  year      = {2011},
+  abstract  = {The need for fast and accurate performance testing of solar collectors is increasing. This paper describes a new technique for performance testing which is based on non-linear continuous time models of the heat dynamics of the collector. It is shown that all important performance parameters can be accurately estimated with measurements from a single day. The estimated parameters are compared with results from standardized test methods (Fischer et al., 2004). Modelling the dynamics of the collector is carried out using stochastic differential equations, which is a well proven ef?cient method to obtain accurate estimates of parameters in physical models. The applied method is described by Kristensen et al. (2004) and implemented in the software CTSM1 . Examples of successful applications of the method includes modelling the of the heat dynamics of integrated photo-voltaic modules (Friling et al., 2009) and modelling of the heat dynamics of buildings (Madsen and Holst, 1995). Measurements obtained at a test site in Denmark during the spring 2010 are used for the modelling. The tested collector is a single glazed large area ?at plate collector with selective absorber and Te?on anti convection layer. The test rig is described in Fan et al. (2009). The modelling technique provides uncertainty estimates such as con?dence intervals for the parameters, and furthermore enables statistical validation of the results. Such tests can also facilitate procedures for selecting the best model to use, which is a very non-trivial task.},
+  file      = {Bacher2011b.pdf:Bacher2011b.pdf:PDF},
+  groups    = {Grey-box modeling, Buildings},
+  owner     = {pb},
+  timestamp = {2011.11.23},
+}
+
+@Article{Bacher2013,
+  author    = {Bacher, Peder and Madsen, Henrik and Perers, Bengt and Nielsen, Henrik Aalborg},
+  title     = {A non-parametric method for correction of global radiation observations},
+  journal   = {Solar Energy},
+  year      = {2013},
+  volume    = {88},
+  pages     = {13-22},
+  issn      = {0038092x},
+  abstract  = {This paper presents a method for correction and alignment of global radiation observations based on information obtained from calculated global radiation, in the present study one-hour forecast of global radiation from a numerical weather prediction (NWP) model is used. Systematical errors detected in the observations are corrected. These are errors such as: tilt in the leveling of the sensor, shadowing from surrounding objects, clipping and saturation in the signal processing, and errors from dirt and wear. The method is based on a statistical non-parametric clear-sky model which is applied to both the observed and the calculated radiation in order to find systematic deviations between them. The method is applied to correct global radiation observations from a climate station located at a district heating plant in Denmark. The results are compared to observations recorded at the Danish Technical University. The method can be useful for optimized use of solar radiation observations for forecasting, monitoring, and modeling of energy production and load which are affected by solar radiation. © 2012 Elsevier Ltd.},
+  doi       = {10.1016/j.solener.2012.10.024},
+  file      = {Bacher2013.pdf:Bacher2013.pdf:PDF},
+  groups    = {Other models},
+  owner     = {pb},
+  timestamp = {2013.01.17},
+}
+
+@Book{Bacher2015,
+  Title                    = {Methodology and forecast products for the optimal offering of ancillary services from wind in a market environment},
+  Author                   = {Peder Bacher and Henrik Madsen and Pierre Pinson and Mortensen, {Stig Bousgaard} and Nielsen, {Henrik Aalborg}},
+  Publisher                = {Technical University of Denmark},
+  Year                     = {2015},
+  Series                   = {DTU Compute-Technical Report-2015},
+
+  Owner                    = {pb},
+  Timestamp                = {2015.08.31}
+}
+
+@InProceedings{Bacher2011c,
+  Title                    = {Opensource software for mlr-modelling of solar collectors},
+  Author                   = {Bacher, P. and Perers, B.},
+  Booktitle                = {Proceedings of ISES Solar World Conference 2011},
+  Year                     = {2011},
+
+  Abstract                 = {A first research version is now in operation of a software package for multiple linear regression (MLR) modeling and analysis of solar collectors according to ideas originating all the way from Walletun et. al. (1986), Perers, (1987 and 1993). The tool has been implemented in the free and open source program R http://www.r-project.org/. Applications of the software package includes: visual validation, resampling and conversion of data, collector performance testing analysis according to the European Standard EN 12975 (Fischer et al., 2004), statistical validation of results, and the determination of collector incidence angle modifiers without the need of a mathematical function (Perers, 1997). The paper gives a demonstration with examples of the applications, based on measurements obtained at a test site at DTU in Denmark (Fan et al., 2009). The tested collector is a single glazed large area flat plate collector with selective absorber and teflon anti convection layer. The package is intended to enable fast and reliable validation of data, and provide a united implementation for MLR testing of solar collectors. This will furthermore make it simple to replicate the calculations by a third party in order to validate the results. Finally more advanced methods can be implemented and easily shared as extensions to the package, for example methods for statistical estimation of the incidence angle modifier with non-linear functions for collectors with more complicated optics. The overall advantage of this kind of tool and analysis is that it is almost the inverse of simulation. Therefore the model and parameters will be very well validated for application in later use for system simulation, even if the test is no real system test. Also for annual collector performance calculations with a new Excel tool connected to EN 12975 (Kovacs, 2011) this built in validation gives an extra quality assurance.},
+  File                     = {Bacher2011c.pdf:Bacher2011c.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2011.11.23}
+}
+
+@Conference{Bacher2013b,
+  author    = {Bacher, Peder, and Madsen, Henrik and Nielsen, Henrik Aalborg and Mortensen, Stig Bousgaard},
+  title     = {Modelling and evaluation of building thermal performance from smart meter readings},
+  booktitle = {HIGH PERFORMANCE BUILDINGS - Design and Evaluation Methodologies -},
+  year      = {2013},
+  file      = {Bacher2013b.pdf:Bacher2013b.pdf:PDF},
+  groups    = {PerformanceKPI},
+  owner     = {pb},
+  quality   = {1},
+  timestamp = {2013.08.19},
+}
+
+@Article{Bauwens2014,
+  Title                    = {Co-heating test: A state-of-the-art},
+  Author                   = {Bauwens, Geert and Roels, Staf},
+  Journal                  = {Energy and Buildings},
+  Year                     = {2014},
+  Pages                    = {163--172},
+  Volume                   = {82},
+
+  File                     = {Bauwens2014.pdf:Bauwens2014.pdf:PDF},
+  Owner                    = {pbac},
+  Publisher                = {Elsevier},
+  Timestamp                = {2017.03.27}
+}
+
+@Book{Beirlant2006,
+  title     = {Statistics of extremes: theory and applications},
+  publisher = {Wiley. com},
+  year      = {2006},
+  author    = {Beirlant, Jan and Goegebeur, Yuri and Segers, Johan and Teugels, Jozef},
+  file      = {Beirlant2006.pdf:Beirlant2006.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  timestamp = {2014.01.13},
+}
+
+@TechReport{Bird1984,
+  author      = {Bird, R. and Riordan, C.},
+  title       = {Simple solar spectral model for direct and diffuse irradiance on horizontal and tilted planes at the earth's surface for cloudless atmospheres},
+  institution = {Solar Energy Research Inst., Golden, CO (USA)},
+  year        = {1984},
+  file        = {Bird1984.pdf:Bird1984.pdf:PDF},
+  groups      = {phdthesis},
+  owner       = {pb},
+  timestamp   = {2012.04.03},
+}
+
+@Article{Bird1984a,
+  Title                    = {A simple, solar spectral model for direct-normal and diffuse horizontal irradiance},
+  Author                   = {Richard E. Bird},
+  Journal                  = {Solar Energy},
+  Year                     = {1984},
+  Number                   = {4},
+  Pages                    = {461-471},
+  Volume                   = {32},
+
+  Abstract                 = {A spectral model for cloudless days that uses simple mathematical expressions and tabulated look-up tables to generate direct-normal and diffuse horizontal irradiance is presented. The model is based on modifications to previously published simple models and comparisons with rigorous radiative transfer codes. This model is expected to be more accurate than previous simple models and applicable to a broader range of atmospheric conditions. The primary significance of this model is its simplicity, which allows it to be used on small desk-top computers. The spectrum produced by this model is limited to 0.3–4.0 μm wavelength with an approximate resolution of 10 nm.},
+  Doi                      = {10.1016/0038-092X(84)90260-3},
+  File                     = {Bird1984a.pdf:Bird1984a.pdf:PDF},
+  ISSN                     = {0038-092X},
+  Owner                    = {pb},
+  Timestamp                = {2012.09.12},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/0038092X84902603}
+}
+
+@MastersThesis{Bitsch2016,
+  Title                    = {Statistical Learning for Energy
+Informatics},
+  Author                   = {Magnus Alexander Bitsch},
+  School                   = {DTU},
+  Year                     = {2016},
+
+  Owner                    = {pbac},
+  Timestamp                = {2016.10.10}
+}
+
+@Article{Blarke2012,
+  author    = {Morten B. Blarke and Kazuaki Yazawa and Ali Shakouri and Carolina Carmo},
+  title     = {Thermal Battery with CO2 compression heat pump: Techno-economic optimization of a high-efficiency Smart Grid option for buildings},
+  journal   = {Energy and Buildings},
+  year      = {2012},
+  pages     = {-},
+  issn      = {0378-7788},
+  abstract  = {Increasing penetration levels of wind and solar power in the energy system call for the development of Smart Grid enabling technologies. As an alternative to expensive electro-chemical and mechanical storage options, the thermal energy demand in buildings offers a cost-effective option for intermittency-friendly electricity consumption patterns. Combining hot and cold thermal storages with new high-pressure compressor technology that allows for flexible and simultaneous production of useful heat and cooling, the paper introduces and investigates the high-efficiency Thermal Battery (TB) concept. In a proof-of-concept case study, the TB replaces an existing electric resistance heater used for hot water production and an electric compressor used for air refrigeration in a central air conditioning system. A mathematical model for least-cost unit dispatch is developed. Heat pump cycle components and thermal storages are designed and optimized. A general methodology is applied that allows for comparing the obtained results with other Smart Grid enabling options. It is found that the TB concept leads to improvements in the intermittency-friendliness of operation Rc (improves from -0.11 to 0.46), lower CO2 emissions (reduced to zero), and lower operational costs (reduced by 72%). The results indicate that TB may be the most cost-effective Smart Grid enabling option for supporting higher penetration levels of intermittent renewables in the energy system.},
+  doi       = {10.1016/j.enbuild.2012.03.029},
+  file      = {Blarke2012.pdf:Blarke2012.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Smart Grid},
+  owner     = {pb},
+  timestamp = {2012.04.01},
+}
+
+@Conference{Bloem2009,
+  Title                    = {Dynamic Analysis Methodologies Applied to Energy Management in Residential Buildings},
+  Author                   = {Bloem, H. and Atanasiu, B.},
+  Booktitle                = {EEDAL'09 Conference 16-18 June, Berlin, Germany},
+  Year                     = {2009},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.11}
+}
+
+@Conference{Bloem2007,
+  Title                    = {Dynamic Analysis Methods and Modelling. Application to Energy Performance Assessment.},
+  Author                   = {Bloem, J.J.},
+  Booktitle                = {PALENC conference, Crete 27-29 September},
+  Year                     = {2007},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.10}
+}
+
+@MastersThesis{Bondy2012,
+  author    = {Daniel Esteban Morales Bondy and Jacopo Parvizi},
+  title     = {Modeling, Identification and Control for heat dynamics of Buildings using Robust Economic Model Predictive Control},
+  school    = {DTU},
+  year      = {2012},
+  file      = {Bondy2012.pdf:Bondy2012.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2013.06.11},
+}
+
+@Article{Bowman1985,
+  author    = {Bowman, N. T. and Lomas, K. J.},
+  title     = {Empirical validation of dynamic thermal computer models of buildings},
+  journal   = {Building Services Engineering Research and Technology},
+  year      = {1985},
+  volume    = {6},
+  number    = {4},
+  pages     = {153-162},
+  abstract  = {A methodology for the validation of dynamic thermal models of buildings has been presented. The three techniques, analytical verification, inter-model comparisons and empirical validation have been described and their relative merits assessed by reference to previous validation work on ESP, SERIR'S, DEROB and BLAST. Previous empirical validation work on these models has been reviewed. This research has shown that numerous sources of error have existed in previous studies leading to uncertainty in model predictions. The effects of these errors has meant that none of the previous empirical validation studies would have produced conclusive evidence of internal errors in the models themselves. An approach towards developing tests to empirically validate dynamic thermal models is given.},
+  doi       = {10.1177/014362448500600403},
+  eprint    = {http://bse.sagepub.com/content/6/4/153.full.pdf+html},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2010.11.17},
+}
+
+@Book{Box1976,
+  Title                    = {Time series analysis},
+  Author                   = {Box, G.E.P. and Jenkins, G.M. and Reinsel, G.C.},
+  Publisher                = {Holden-day San Francisco},
+  Year                     = {1976},
+
+  Owner                    = {pb},
+  Timestamp                = {2012.03.28}
+}
+
+@Article{Boyer1996,
+  author                     = {Boyer, H and Chabriat, JP and GrondinPerez, B and Tourrand, C and Brau, J},
+  title                      = {Thermal building simulation and computer generation of nodal models},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {1996},
+  volume                     = {31},
+  number                     = {3},
+  pages                      = {207-214},
+  month                      = {MAY},
+  issn                       = {0360-1323},
+  abstract                   = {The designer's preoccupation with reducing energy needs and obtaining a better thermal quality of indoor ambiance has helped in the development of several packages simulating the dynamic behaviour of buildings. This paper shows the adaptation of a method of thermal analysis, the nodal analysis, linked to the case of a building's thermal behaviour. We consider the case of conduction into a wall, the coupling with superficial exchanges and finally the constitution of thermal state models of the budding. Large variations exist from one building to another, and it is therefore necessary to build the thermal model from the building description. The chosen method in the case of our thermal simulation program for buildings, CODYRUN, is explained. Copyright (C) 1996 Elsevier Science Ltd.},
+  file                       = {Boyer1996.pdf:Boyer1996.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {15},
+  owner                      = {pb},
+  review                     = {Uses (complex) thermal-network models to simulate buildings.},
+  times-cited                = {16},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1996UP21700002},
+}
+
+@Booklet{BP2011,
+  title       = {Statistical Review of World Energy 2011},
+  author      = {BP},
+  year        = {2011},
+  groups      = {phdthesis},
+  lastchecked = {2012-04-03},
+  owner       = {pb},
+  timestamp   = {2012.04.03},
+  url         = {www.bp.com/statisticalreview},
+}
+
+@Article{Braun2002,
+  author    = {Braun, James E. and Chaturvedi, Nitin},
+  title     = {An Inverse Gray-Box Model for Transient Building Load Prediction},
+  journal   = {HVAC\&R Research},
+  year      = {2002},
+  volume    = {8},
+  number    = {1},
+  pages     = {73-99},
+  abstract  = { Lower costs and improved performance of sensors, controllers, and networking is leading to the development of smart building features, such as continuous performance monitoring, automated diagnostics, and optimal supervisory control. For some of these applications, it is important to be able to predict transient cooling and heating requirements for the building using inverse models that are trained using on-site data. Existing inverse models for transient building loads range from purely empirical or “black-box” models to purely physical or “white-box” models. Generally, black-box (e.g., neural network) models require a significant amount of training data and may not always reflect the actual physical behavior, whereas white-box (e.g., finite difference) models require specification of many physical parameters. This paper presents a hybrid or “gray-box” modeling approach that uses a transfer function with parameters that are constrained to satisfy a simple physical representation for energy flows in the building structure. A robust method is also presented for training parameters of the constrained model, wherein initial values of and bounds on physical parameters are estimated from a rough building description, better estimates are obtained using a global direct search algorithm, and optimal parameters are identified using a nonlinear regression algorithm. The model and training method were extensively tested for different buildings and locations using data generated from a detailed simulation program. The approach was also tested using data from a field site located near Chicago, Illinois. It was found that one to two weeks of data are sufficient to train a model so that it can accurately predict transient cooling or heating requirements. },
+  doi       = {10.1080/10789669.2002.10391290},
+  eprint    = {http://www.tandfonline.com/doi/pdf/10.1080/10789669.2002.10391290},
+  file      = {Braun2002.pdf:Braun2002.pdf:PDF},
+  groups    = {single house forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.28},
+}
+
+@Article{Braun2014,
+  Title                    = {Using regression analysis to predict the future energy consumption of a supermarket in the \{UK\} },
+  Author                   = {M.R. Braun and H. Altan and S.B.M. Beck},
+  Journal                  = {Applied Energy },
+  Year                     = {2014},
+  Number                   = {0},
+  Pages                    = {305 - 313},
+  Volume                   = {130},
+
+  Abstract                 = {Abstract The change in climate has led to an interest in how this will affect the energy consumption in buildings. Most of the work in the literature relates to offices and homes. However, this paper investigates a supermarket in northern England by means of a multiple regression analysis based on gas and electricity data for 2012. The equations obtained in this analysis use the humidity ratio derived from the dry-bulb temperature and the relative humidity in conjunction with the actual dry-bulb temperature. These equations are used to estimate the consumption for the base year period (1961–1990) and for the predicted climate period 2030–2059. The findings indicate that electricity use will increase by 2.1% whereas gas consumption will drop by about 13% for the central future estimate. The research further suggests that the year 2012 is comparable in temperature to the future climate, but the relative humidity is lower. Further research should include adaptation/mitigation measures and an evaluation of their usefulness. },
+  Doi                      = {http://dx.doi.org/10.1016/j.apenergy.2014.05.062},
+  ISSN                     = {0306-2619},
+  Keywords                 = {Energy consumption},
+  Owner                    = {pb},
+  Timestamp                = {2015.01.30},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0306261914005674}
+}
+
+@Article{Bremnes2004,
+  author    = {Bremnes, John Bjørnar},
+  title     = {Probabilistic wind power forecasts using local quantile regression},
+  journal   = {Wind Energy},
+  year      = {2004},
+  volume    = {7},
+  number    = {1},
+  pages     = {47-54},
+  issn      = {1099-1824},
+  abstract  = {Wind power forecasts are in various ways valuable for users in decision-making processes. However, most forecasts are deterministic, and hence possibly important information about uncertainty is not available. Complete information about future production can be obtained by using probabilistic forecasts, and this article demonstrates how such forecasts can be created by means of local quantile regression. The approach has several advantages, such as no distributional assumptions and flexible inclusion of predictive information. In addition, it can be shown that, for some purposes, forecasts in terms of quantiles provide the type of information required to make optimal economic decisions. The methodology is applied to data from a wind farm in Norway. Copyright © 2004 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/we.107},
+  file      = {Bremnes2004.pdf:Bremnes2004.pdf:PDF},
+  groups    = {OptimalBidding},
+  keywords  = {wind power, probabilistic forecasts, quantile regression, economic value},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.04.04},
+}
+
+@InProceedings{Breyer2011,
+  Title                    = {HYBRID PV-WIND-RENEWABLE METHANE POWER PLANTS - A POTENTIAL CORNERSTONE OF GLOBAL ENERGY SUPPLY},
+  Author                   = {Breyer, C. and Rieke, S. and Sterner, M. and Schmid, J.},
+  Booktitle                = {Proceedings of ISES Solar World Conference 2011},
+  Year                     = {2011},
+
+  File                     = {Breyer2011.pdf:Breyer2011.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.12}
+}
+
+@Book{Brockhoff2015,
+  Title                    = {Introduction to Statistics - eNotes},
+  Author                   = {Brockhoff, {Per B.} and Møller, {Jan Kloppenborg} and Andersen, {Elisabeth Wreford} and Peder Bacher and Christiansen, {Lasse Engbo}},
+  Year                     = {2015},
+
+  Owner                    = {pb},
+  Timestamp                = {2015.11.18},
+  Type                     = {Other <importModel: OtherImportModel>},
+  Url                      = {http://introstat.compute.dtu.dk/enote/}
+}
+
+@Article{Brunger1993,
+  author    = {Alfred P. Brunger and Frank C. Hooper},
+  title     = {Anisotropic sky radiance model based on narrow field of view measurements of shortwave radiance},
+  journal   = {Solar Energy},
+  year      = {1993},
+  volume    = {51},
+  number    = {1},
+  pages     = {53-64},
+  issn      = {0038-092X},
+  abstract  = {A model for the average anisotropic sky radiance (or intensity) as a function of the position of the sun, the diffuse fraction k, and the atmospheric clearness index k1 are presented in this article. The complete range of sky conditions from clear to turbid to overcast is covered. Analysis of the observed data indicates that the model can be used to estimate instantaneous sky radiance values with a mean bias error of −11% and a root mean square error of 65% of the mean. The model is shown to account for 83% of the deterministic part of the variance of the instantaneous sky radiance measurements. The formulation for the sky radiance L(θ, F) in the direction (θ, F) is L(θφ)=Gda0+a1cosθ+a2exp(−a3·Ψ)π(a0+2a13+2a2I(θs,a3) where Ψ is the scattering angle from the solar beam, a0, a1, a2, a3 are tabulated functions of k and k1, and I(θs, a3) is a specific function of a3 and the solar zenith angle θs.},
+  doi       = {10.1016/0038-092X(93)90042-M},
+  file      = {Brunger1993.pdf:Brunger1993.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.03.31},
+}
+
+@TechReport{Cappelen1999,
+  Title                    = {Observed wind speed and direction in Denmark - with climatoligical standards normals, 1961-90},
+  Author                   = {Cappelen, J. and Jørgensen, B.},
+  Institution              = {Danish Meteorological Institute},
+  Year                     = {1999},
+
+  File                     = {Cappelen1999.pdf:Cappelen1999.pdf:PDF},
+  Owner                    = {pb},
+  Publisher                = {DMI},
+  Timestamp                = {2012.03.27}
+}
+
+@Misc{CEN2006,
+  Title                    = {EN{~}12975-2:2006, Thermal solar systems and components - Collectors - Part 2: Test methods},
+
+  Author                   = {{CEN, European committee for standardization}},
+  Year                     = {2006},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.08.11}
+}
+
+@Article{Charytoniuk1998,
+  Title                    = {Nonparametric regression based short-term load forecasting},
+  Author                   = {Charytoniuk, W. and Chen, M.-S. and Van Olinda, P.},
+  Journal                  = {Power Systems, IEEE Transactions on},
+  Year                     = {1998},
+  Number                   = {3},
+  Pages                    = {725-730},
+  Volume                   = {13},
+
+  Abstract                 = {This paper presents a novel approach to short-time load forecasting by the application of nonparametric regression. The method is derived from a load model in the form of a probability density function of load and load affecting factors. A load forecast is a conditional expectation of load given the time, weather conditions and other explanatory variables. This forecast can be calculated directly from historical data as a local average of observed past loads with the size of the local neighborhood and the specific weights on the loads defined by a multivariate product kernel. The method accuracy relies on the adequate representation of possible future conditions by historical data, but a measure to detect any unreliable forecast can be easily constructed. The proposed procedure requires few parameters that can be easily calculated from historical data by applying the cross-validation technique},
+  Doi                      = {10.1109/59.708572},
+  File                     = {Charytoniuk1998.pdf:Charytoniuk1998.pdf:PDF},
+  ISSN                     = {0885-8950},
+  Keywords                 = {load forecasting;probability;statistical analysis;cross-validation technique;historical data;load affecting factors;local neighborhood;multivariate product kernel;nonparametric regression;probability density function;short-term load forecasting;weather conditions;Artificial neural networks;Economic forecasting;Energy management;Load forecasting;Load modeling;Power system analysis computing;Power system economics;Power system management;Predictive models;Weather forecasting},
+  Owner                    = {pb},
+  Timestamp                = {2013.11.12}
+}
+
+@Article{Chen1995,
+  Title                    = {Analysis of an adaptive time-series autoregressive moving-average (ARMA) model for short-term load forecasting},
+  Author                   = {Chen, J.-F. and Wang, W.-M. and Huang, C.-M.},
+  Journal                  = {Electric Power Systems Research},
+  Year                     = {1995},
+  Number                   = {3},
+  Pages                    = {187-196},
+  Volume                   = {34},
+
+  Abstract                 = {In this paper, an adaptive ARMA (autoregressive moving-average) model is developed for short-term load forecasting of a power system. For short-term load forecasting, the Box-Jenkins transfer function approach has been regarded as one of the most accurate methods. However, the Box-Jenkins approach without adapting the forecasting errors available to update the forecast has limited accuracy. The adaptive approach first derives the error learning coefficients by virtue of minimum mean square error (MMSE) theory and then updates the forecasts based on the one-step-ahead forecast errors and the coefficients. Due to its adaptive capability, the algorithm can deal with any unusual system condition. The employed algorithm has been tested and compared with the Box-Jenkins approach. The results of 24-hours- and one-week-ahead forecasts show that the adaptive algorithm is more accurate than the conventional Box-Jenkins approach, especially for the 24-hour case. © 1995.},
+  Affiliation              = {Department of Electrical Engineering, National Cheng Kung University, Tainan, 701, Taiwan},
+  Author_keywords          = {Adaptive algorithms; Box-Jenkins time series; Load forecasting; Minimum mean square error theory},
+  Owner                    = {pb},
+  Source                   = {Scopus},
+  Timestamp                = {2013.11.12}
+}
+
+@Article{Chen2001,
+  author                     = {Chen, TY},
+  title                      = {Real-time predictive supervisory operation of building thermal systems with thermal mass},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2001},
+  volume                     = {33},
+  number                     = {2},
+  pages                      = {141-150},
+  month                      = {JAN},
+  issn                       = {0378-7788},
+  abstract                   = {The real-time predictive dynamic operation of building thermal systems is explored in this paper. A systematic methodology is described for predictive supervisory operation. The focus of this study is on the development of an operation supervisor that can determine optimal operation strategies for building thermal processes, and its application to a floor heating system in a passive solar room with thermal mass. An optimal model associated with a number of practical operation constraints is formulated, and an approach that combines dynamic programming and on-line simulation is developed, to efficiently solve the problem in real-time. Simulations for the minimisation of energy consumption and operating cost, with different operation schedules under various weather conditions. are performed. Results show that the techniques are computationally efficient and that the predictive supervisory operation of floor heating systems may lead to significant savings in energy consumption and operating cost. (C) 2001 Elsevier Science B.V. All rights reserved.},
+  file                       = {Chen2001.pdf:Chen2001.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {24},
+  owner                      = {pb},
+  times-cited                = {15},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000166247600007},
+}
+
+@Article{Chen2000,
+  author                     = {Chen, YM and Chen, ZK},
+  title                      = {A neural-network-based experimental technique for determining z-transfer function coefficients of a building envelope},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {2000},
+  volume                     = {35},
+  number                     = {3},
+  pages                      = {181-189},
+  month                      = {APR},
+  issn                       = {0360-1323},
+  abstract                   = {The z-transfer function technique is used in calculation for HVAC design and building energy consumption. Theoretical calculation methods to determine the z-transfer function coefficients, which characterize the dynamic thermal performance of building components, do exist, but these depend on a number of assumptions and one must supplement them with experimental techniques. This paper discusses a neural-network-based system identification technique to determine the z-transfer function of a building envelope from experimental data. A multi-layer neural network is trained by the samples constructed from the dynamically measured data of heat conduction process through a wall. The Markov parameters, which are produced from the weighting matrices of the network, are utilized to realize the minimal state space model of the wall by eigensystem realization algorithm. The z-transfer function coefficients are obtained by the algorithm transforming the state space model into z-transfer function. The results show that this technique has some advantages in programming and computational simplicity, very good properties of noise rejection and improved accuracy of the results. The training time of the network is greatly reduced by adopting the adaptive learning algorithm. (C) 2000 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Chen2000.pdf:Chen2000.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {12},
+  owner                      = {pb},
+  times-cited                = {20},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000084565900001},
+}
+
+@Article{Christian1978,
+  author    = {Christian, L. Edward and Everson, Dale O. and Davis, Steven L.},
+  title     = {A Statistical Method for Detection of Hormone Secretory Spikes},
+  journal   = {Journal of Animal Science},
+  year      = {1978},
+  volume    = {46},
+  number    = {3},
+  pages     = {699-706},
+  eprint    = {http://www.journalofanimalscience.org/content/46/3/699.full.pdf+html},
+  file      = {Christian1978.pdf:Christian1978.pdf:PDF},
+  groups    = {kernelpaper},
+  owner     = {pb},
+  timestamp = {2013.11.28},
+  url       = {http://www.journalofanimalscience.org/content/46/3/699.short},
+}
+
+@Article{Cleveland1976,
+  Title                    = {Decomposition of seasonal time series: A model for the Census X-11 program},
+  Author                   = {Cleveland, W.P. and Tiao, G.C.},
+  Journal                  = {Journal of the American statistical Association},
+  Year                     = {1976},
+  Pages                    = {581-587},
+
+  File                     = {Cleveland1976.pdf:Cleveland1976.pdf:PDF},
+  Owner                    = {pb},
+  Publisher                = {JSTOR},
+  Timestamp                = {2012.03.23}
+}
+
+@Article{Cleveland1992,
+  Title                    = {Local regression models},
+  Author                   = {Cleveland, William S and Grosse, Eric and Shyu, William M},
+  Journal                  = {Statistical models in S},
+  Year                     = {1992},
+  Pages                    = {309--376},
+
+  File                     = {Cleveland1992.ps:Cleveland1992.ps:PostScript},
+  Owner                    = {pb},
+  Publisher                = {Pacific Grove},
+  Quality                  = {1},
+  Timestamp                = {2014.01.24}
+}
+
+@Book{Coles2001,
+  title     = {An introduction to statistical modeling of extreme values},
+  publisher = {Springer},
+  year      = {2001},
+  author    = {Coles, Stuart},
+  file      = {Coles2001.pdf:Coles2001.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  timestamp = {2013.11.27},
+}
+
+@Book{Conejo2010,
+  title     = {Decision making under uncertainty in electricity markets},
+  publisher = {Springer},
+  year      = {2010},
+  author    = {Conejo, Antonio J and Carri{\'o}n, Miguel and Morales, Juan M},
+  volume    = {153},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.26},
+}
+
+@Article{Conejo2011,
+  author    = {Conejo, Antonio J and Morales, Juan M and Mart{\'\i}nez, Juan A},
+  title     = {Tools for the analysis and design of distributed resources—Part III: Market studies},
+  journal   = {IEEE Transactions on Power Delivery},
+  year      = {2011},
+  volume    = {26},
+  number    = {3},
+  pages     = {1663--1670},
+  groups    = {Markets},
+  owner     = {pb},
+  publisher = {Institute of Electrical and Electronics Engineers, Inc., 345 E. 47 th St. NY NY 10017-2394 United States},
+  timestamp = {2014.09.26},
+}
+
+@Article{Cook1982,
+  author    = {N.J. Cook},
+  title     = {Towards better estimation of extreme winds},
+  journal   = {Journal of Wind Engineering and Industrial Aerodynamics},
+  year      = {1982},
+  volume    = {9},
+  number    = {3},
+  pages     = {295 - 323},
+  issn      = {0167-6105},
+  doi       = {http://dx.doi.org/10.1016/0167-6105(82)90021-6},
+  file      = {Cook1982.pdf:Cook1982.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  timestamp = {2014.03.28},
+  url       = {http://www.sciencedirect.com/science/article/pii/0167610582900216},
+}
+
+@MastersThesis{Corradi2011,
+  author    = {Corradi, Olivier and Ochsenfeld, Henning},
+  title     = {Integration of fluctuating energy by electricity price control},
+  school    = {Denmarks Technical University},
+  year      = {2011},
+  file      = {Corradi2011.pdf:Corradi2011.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2012.10.30},
+}
+
+@Article{Costa2008,
+  Title                    = {A review on the young history of the wind power short-term prediction},
+  Author                   = {Alexandre Costa and Antonio Crespo and Jorge Navarro and Gil Lizcano and Henrik Madsen and Everaldo Feitosa},
+  Journal                  = {Renewable and Sustainable Energy Reviews},
+  Year                     = {2008},
+  Number                   = {6},
+  Pages                    = {1725-1744},
+  Volume                   = {12},
+
+  Doi                      = {DOI: 10.1016/j.rser.2007.01.015},
+  ISSN                     = {1364-0321},
+  Keywords                 = {Wind energy},
+  Owner                    = {pb},
+  Timestamp                = {2011.04.10}
+}
+
+@TechReport{COWI2011,
+  author      = {COWI},
+  title       = {Afdækning af potentiale for varmepumper til opvarmning af helårshuse i Danmark til erstatning for oliefyr},
+  institution = {Danish Energy Agency},
+  year        = {2011},
+  month       = {November},
+  file        = {COWI2011.pdf:COWI2011.pdf:PDF},
+  groups      = {single house forecasting},
+  owner       = {pb},
+  timestamp   = {2012.03.22},
+}
+
+@Article{Crabb1987,
+  author    = {Crabb, J A and Murdoch, N and Penman, J M},
+  title     = {A simplified thermal response model},
+  journal   = {Building Services Engineering Research and Technology},
+  year      = {1987},
+  volume    = {8},
+  number    = {1},
+  pages     = {13-19},
+  abstract  = {The observed dynamic thermal response of intermittently occupied buildings can usually be described by one or two time constants. This suggests that much of the complexity in detailed computer thermal response models may not always be necessary. Simplified thermal response models may be adequate for many purposes, and ought to have cost advantages, both in setting up and run time. The paper describes the basis of a simplified microcomputer model of building thermal response. Model predictions of internal temperature variation in a working school are shown to agree well with observations. The model is intended for professional use and user interface is being developed in consultation with an architectural practice.},
+  doi       = {10.1177/014362448700800104},
+  eprint    = {http://bse.sagepub.com/content/8/1/13.full.pdf+html},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2010.11.16},
+}
+
+@Booklet{ClimateChangePolicy2010,
+  title     = {Green energy – the road to a Danish energy system without fossil fuels},
+  author    = {{Danish Commission on Climate Change Policy}},
+  month     = {September},
+  year      = {2010},
+  file      = {ClimateChangePolicy2010.pdf:ClimateChangePolicy2010.pdf:PDF},
+  groups    = {single house forecasting, phdthesis},
+  owner     = {pb},
+  timestamp = {2012.03.22},
+}
+
+@TechReport{DanishCommissiononClimateChangePolicy2010,
+  author      = {{Danish Commission on Climate Change Policy}},
+  title       = {Grøn Energi - vejen mod et dansk energisystem uden fossile brændsler},
+  institution = {Danish Energy Agency},
+  year        = {2010},
+  month       = {September},
+  note        = {Dokumentationsdelen til Klimakommissionens samlede rapport},
+  file        = {DanishCommissiononClimateChangePolicy2010.pdf:DanishCommissiononClimateChangePolicy2010.pdf:PDF},
+  groups      = {single house forecasting},
+  owner       = {pb},
+  timestamp   = {2012.03.22},
+}
+
+@Article{Datta2000,
+  Title                    = {Application of Neural Networks for the Prediction of the Energy Consumption in a Supermarket},
+  Author                   = {Datta, D and Tassou, SA and Marriott, D},
+  Journal                  = {In: Proc. CLIMA 2000 p.98},
+  Year                     = {2000},
+
+  Abstract                 = {It has been shown by previous researchers that Artificial Neural Networks (ANNs) not only be used to predict energy more reliably than traditional simulation models and regression techniques but can also from the basis for a predictive controller of thermal systems such as HVAC equipment. This work is directed towards the identification of the important inputs (independent variables) to facilitate on-line prediction and thereby implement refrigeration and HVAC system diagnostics, process control, optimisation and energy management in retail food stores. This paper presents preliminary results on the prediction of electricity consumption with different independent input variables in a supermarket. The paper also compares the prediction performance of neural networks with the more traditional multiple regression techniques.},
+  Bdsk-file-1              = {YnBsaXN0MDDUAQIDBAUGJCVYJHZlcnNpb25YJG9iamVjdHNZJGFyY2hpdmVyVCR0b3ASAAGGoKgHCBMUFRYaIVUkbnVsbNMJCgsMDxJXTlMua2V5c1pOUy5vYmplY3RzViRjbGFzc6INDoACgAOiEBGABIAFgAdccmVsYXRpdmVQYXRoWWFsaWFzRGF0YV8QIC4uL0xpdHRlcmF0dXJlL1Jldmlld3MvRGF0dGEucGRm0hcLGBlXTlMuZGF0YU8RAbAAAAAAAbAAAgAADE1hY2ludG9zaCBIRAAAAAAAAAAAAAAAAAAAAMnnm0RIKwAAAHc76AlEYXR0YS5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdz5FzVJLeAAAAAAAAAAAAAEAAwAACSAAAAAAAAAAAAAAAAAAAAAHUmV2aWV3cwAAEAAIAADJ538kAAAAEQAIAADNUj1oAAAAAQAYAHc76AB3Ir8AcAGSABRw9wAIe5cAAJigAAIAT01hY2ludG9zaCBIRDpVc2VyczoAbGlzYWJ1dGg6AFN0dWRpZToAU3BlY2lhbGU6AExpdHRlcmF0dXJlOgBSZXZpZXdzOgBEYXR0YS5wZGYAAA4AFAAJAEQAYQB0AHQAYQAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAPFVzZXJzL2xpc2FidXRoL1N0dWRpZS9TcGVjaWFsZS9MaXR0ZXJhdHVyZS9SZXZpZXdzL0RhdHRhLnBkZgATAAEvAAAVAAIAD///AACABtIbHB0eWiRjbGFzc25hbWVYJGNsYXNzZXNdTlNNdXRhYmxlRGF0YaMdHyBWTlNEYXRhWE5TT2JqZWN00hscIiNcTlNEaWN0aW9uYXJ5oiIgXxAPTlNLZXllZEFyY2hpdmVy0SYnVHJvb3SAAQAIABEAGgAjAC0AMgA3AEAARgBNAFUAYABnAGoAbABuAHEAcwB1AHcAhACOALEAtgC+AnICdAJ5AoQCjQKbAp8CpgKvArQCwQLEAtYC2QLeAAAAAAAAAgEAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAuA=},
+  Date-added               = {2013-02-26 10:18:20 +0000},
+  Date-modified            = {2013-10-31 10:12:03 +0000},
+  File                     = {Datta2000.pdf:Datta2000.pdf:PDF},
+  Keywords                 = {Found by Scholar google},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.23}
+}
+
+@Article{Davies1982,
+  Title                    = {Estimating solar irradiance and components},
+  Author                   = {John A. Davies and Donald C. McKay},
+  Journal                  = {Solar Energy},
+  Year                     = {1982},
+  Number                   = {1},
+  Pages                    = {55-64},
+  Volume                   = {29},
+
+  Abstract                 = {The performance of models to estimate solar irradiance and its components is assessed using data for six Canadian stations for nine years (1968–1976). Greatest emphasis is placed on a model, the MAC model, which uses cloud information from different layers. Here, effects on model estimates of using observed cloud layer opacities instead of amounts is examined because after 1976 the latter quantity is no longer recorded. Effects of aerosol are also examined. Two other models are considered here; a version of the MAC model which uses total cloud information rather than layer information, and Rietveld's[1] sunshine-based model. In general, the layer model yielded the best results. This is in agreement with a previous Canadian study[2]. RMSE values are between 11 and 15 percent for global irradiance on a daily basis and decrease below 10 percent for averaging periods greater than 2–4 days. Daily RMSE values for the direct beam and diffuse components are 25 per cent. These decrease below 10 per cent for averaging periods larger than 10–15 days. Errors increase when layer opacities are used but results are still acceptable. Aerosol effects can be neglected for much of the country except in Montreal and Toronto. Rietveld's model gives results of lower accuracy than the MAC model but may be useful for quick, easy estimates. The MAC model using total cloud amount performed poorly.},
+  Doi                      = {10.1016/0038-092X(82)90280-8},
+  File                     = {Davies1982.pdf:Davies1982.pdf:PDF},
+  ISSN                     = {0038-092X},
+  Owner                    = {pb},
+  Timestamp                = {2012.09.12},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/0038092X82902808}
+}
+
+@Article{Davies1997,
+  author    = {M. G. Davies},
+  title     = {Wall transient heat flow using time-domain analysis},
+  journal   = {Building and Environment},
+  year      = {1997},
+  volume    = {32},
+  number    = {5},
+  pages     = {427-446},
+  issn      = {0360-1323},
+  abstract  = {If values of ambient temperature and room temperature are known at hourly intervals, the heat flow to or from the room at some specified time can be computed in terms of the values of recent temperatures and heat flows, weighted by values of a series of N wall transfer coefficients, bk, or ck, and dk, (k = 0 to N). These coefficients have hitherto been found by frequency domain methods. It is shown here that they can be evaluated using elementary time domain solutions for wall heat flow. A multi-layer wall with surface films representing radiant and convective exchange is discussed. Using suitable transmission matrices, solutions for the steady-progressive and transient temperature profiles through the wall are derived. The value of N and the wall dk values follow immediately from the wall decay times of the transient solution, together with the choice of 1 hour as time interval. Using a development of Fourier analysis, the steady-progressive and transient solutions are combined so as to provide the time-domain solution for the response to an imposed ramp in ambient or room temperature. Combination of three such ramps forms a triangular temperature pulse of excitation and the infinite series of wall response factors -hourly values of heat flow at and after the temperature peak- serve to describe the response of the wall when excited by such a pulse. The values of bk and ck follow from the response factors. The procedure is illustrated numerically.},
+  doi       = {DOI: 10.1016/S0360-1323(97)00007-3},
+  file      = {Davies1997.pdf:Davies1997.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  review    = {Possibly interesting, but rather complicated physical models...hmm},
+  timestamp = {2010.11.17},
+}
+
+@Article{Davies1983,
+  author    = {M. G. Davies},
+  title     = {Optimum design of resistance and capacitance elements in modelling a sinusoidally excited building wall},
+  journal   = {Building and Environment},
+  year      = {1983},
+  volume    = {18},
+  number    = {1-2},
+  pages     = {19-37},
+  issn      = {0360-1323},
+  abstract  = {A procedure is described to calculate the optimum values of the lumped elements of a T section ladder network which models a multilayer one-dimensional building wall or roof when subjected to sinusoidal excitation. The transmission matrices of the real wall and a chain of simple T sections (1, 2, 3 or 4 units) are evaluated and the T section elements are systematically varied so as to minimise the sums of squares of the differences between the corresponding pairs of the 4 vector elements in the 2 matrices. For a thin wall this procedure is demonstrated analytically for up to 3 sections. The sum of squares is proportional to the fourth power of the slab thickness. Tables are provided of the optimum values for the elements of a homogeneous slab when represented by 1-4 T sections. The investigation is extended to a homogeneous slab flanked by equal and unequal surface films, and the possibility of a degenerate solution for a thin slab is demonstrated. Finally, the case of an entire wall construction is discussed; of a group of 29 walls and roofs, most can be satisfactorily modelled by a 3 T section network consisting of 6 lumped elements. Exact modelling may be possible.},
+  doi       = {DOI: 10.1016/0360-1323(83)90015-X},
+  file      = {Davies1983.pdf:Davies1983.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2010.11.16},
+}
+
+@Book{DeBoor1993,
+  Title                    = {Box splines},
+  Author                   = {De Boor, Carl and H{\"o}llig, Klaus and Riemenschneider, Sherman D},
+  Publisher                = {Springer},
+  Year                     = {1993},
+  Volume                   = {98},
+
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2013.10.23}
+}
+
+@Article{DelBarrio2003,
+  author                     = {Del Barrio, EP and Guyon, G},
+  title                      = {Theoretical basis for empirical model validation using parameters space analysis tools},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2003},
+  volume                     = {35},
+  number                     = {10},
+  pages                      = {985-996},
+  month                      = {NOV},
+  issn                       = {0378-7788},
+  abstract                   = {A new methodology for empirical model validation has been proposed in the framework of the Task 22 (Building Energy Analysis Tools) of the International Energy Agency. It involves two main steps: checking model validity and diagnosis. First step aims to test the model performances by identification of significant disagreements between measurements and simulations. It rests on both residuals analysis techniques and comparisons between model outputs uncertainty bands and measurements uncertainty intervals. Second step intends to explain the differences observed between model simulations and measurements. A new approach for models diagnosis has been proposed. It rests on the analysis of the model parameters space. The main objective is to identify the changes in parameters values that are required for a significant model behaviour improvement. Diagnosis is then provided by comparison of such results with the knowledge we have about both the actual system and the model itself. Main mathematical tools for diagnosis are sensitivity analysis and optimisation techniques. The methodology and the underlying methods we are proposing are presented in the first part of the paper. In the second part, they are applied for testing modelling hypothesis in the framework of the thermal analysis of an actual building. (C) 2003 Elsevier B.V. All rights reserved.},
+  doi                        = {10.1016/S0378-7788(03)00038-0},
+  file                       = {Del2003.pdf:Del2003.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {24},
+  owner                      = {pb},
+  review                     = {Quite interesting about model validation of models applied model to heat dynamics in buildings.},
+  times-cited                = {7},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000185530800002},
+}
+
+@Article{Dent2011,
+  author    = {Dent, C.J. and Bialek, J.W. and Hobbs, B.F.},
+  title     = {Opportunity Cost Bidding by Wind Generators in Forward Markets: Analytical Results},
+  journal   = {Power Systems, IEEE Transactions on},
+  year      = {2011},
+  volume    = {26},
+  number    = {3},
+  pages     = {1600-1608},
+  month     = {Aug},
+  issn      = {0885-8950},
+  abstract  = {Wind generation must trade in forward electricity markets based on imperfect forecasts of its output and real-time prices. When the real-time price differs for generators that are short and long, the optimal forward strategy must be based on the opportunity costs of charges and payments in real-time rather than a central estimate of wind output. We present analytical results for wind's optimal forward strategy. In the risk-neutral case, the optimal strategy is determined by the distribution of real-time available wind capacity, and the expected real-time prices conditioned on the forward price and wind out-turn; our approach is simpler and more computationally efficient than formulations requiring specification of full joint distributions or a large set of scenarios. Informative closed-form examples are derived for particular specifications of the wind-price dependence structure. In the usual case of uncertain forward prices, the optimal bidding strategy generally consists of a bid curve for wind power, rather than a fixed quantity bid. A discussion of the risk-averse problem is also provided. An analytical result is available for aversion to production volume risk; however, we doubt whether wind owners should be risk-averse with respect to the income from a single settlement period, given the large number of such periods in a year.},
+  doi       = {10.1109/TPWRS.2010.2100412},
+  file      = {Dent2011.pdf:Dent2011.pdf:PDF},
+  groups    = {OptimalBidding},
+  keywords  = {power generation economics;power markets;pricing;wind power plants;bid curve;forward markets;forward price;opportunity cost bidding;optimal forward strategy;production volume risk;real-time prices;risk-averse problem;wind capacity;wind generators;wind out-turn;wind-price dependence structure;Forward contracts;Generators;Real time systems;Wind forecasting;Wind power generation;Power generation economics;risk analysis;wind power generation},
+  owner     = {pb},
+  timestamp = {2015.12.18},
+}
+
+@Article{Deque2000,
+  author    = {Deque, F. and Ollivier, F. and Poblador, A.},
+  title     = {Grey boxes used to represent buildings with a minimum number of geometric and thermal parameters},
+  journal   = {Energy and Buildings},
+  year      = {2000},
+  volume    = {31},
+  number    = {1},
+  pages     = {29-35},
+  issn      = {03787788},
+  abstract  = {This article presents the `grey box' models used to represent building envelopes based on the entry of a limited number (ten or so) of parameters per thermal zone. These parameters correspond to the geometric data as well as the thermal characteristics of buildings (i.e., areas of walls, heat losses, etc.). These `grey boxes' are implemented in the CLIM 2000 environment for `multi thermal zones' models in the residential sector. The technical features, applications and limitations of these models are presented on the basis of software comparisons and experimental measurements.},
+  file      = {Deque2000.pdf:Deque2000.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  review    = {Doesn't seem to be very relevant converning sde modelling.},
+  timestamp = {2010.11.17},
+}
+
+@Misc{DMI2012,
+  author       = {DMI},
+  title        = {{Danish Meteorological Institute, Borgervejr}},
+  howpublished = {\url{www.borgervejr.dk}},
+  month        = {Feb.},
+  year         = {2012},
+  groups       = {correction},
+  owner        = {pb},
+  timestamp    = {2012.02.15},
+}
+
+@Misc{DMI2011,
+  Title                    = {{Danish Meteorological Institute, DMI-HIRLAM-S05}},
+
+  Author                   = {DMI},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.08.13},
+  Url                      = {http://www.dmi.dk/eng/index/research_and_development/dmi-hirlam-2009.htm}
+}
+
+@Article{Doherty2005,
+  Title                    = {A new approach to quantify reserve demand in systems with significant installed wind capacity},
+  Author                   = {Doherty, R. and O'Malley, M.},
+  Journal                  = {Power Systems, IEEE Transactions on},
+  Year                     = {2005},
+
+  Month                    = {May},
+  Number                   = {2},
+  Pages                    = {587-595},
+  Volume                   = {20},
+
+  Abstract                 = {With wind power capacities increasing in many electricity systems across the world, operators are faced with new problems related to the uncertain nature of wind power. Foremost of these is the quantification and provision of system reserve. In this paper a new methodology is presented which quantifies the reserve needed on a system taking into account the uncertain nature of the wind power. Generator outage rates and load and wind power forecasts are taken into consideration when quantifying the amount of reserve needed. The reliability of the system is used as an objective measure to determine the effect of increasing wind power penetration. The methodology is applied to a model of the all Ireland electricity system, and results show that as wind power capacity increases, the system must increase the amount of reserve carried or face a measurable decrease in reliability.},
+  Doi                      = {10.1109/TPWRS.2005.846206},
+  ISSN                     = {0885-8950},
+  Keywords                 = {load forecasting;power generation faults;power generation reliability;power system security;wind power plants;Ireland electricity system;generator outage rates;power forecast;power generation faults;power generation reliability;power generation security;power penetration;wind power capacity;Load forecasting;Power generation;Power measurement;Power system modeling;Power system reliability;Wind energy;Wind energy generation;Wind farms;Wind forecasting;Wind power generation;Forecasting;power generation faults;power system security;wind power generation},
+  Owner                    = {pb},
+  Timestamp                = {2015.08.31}
+}
+
+@Article{Dotzauer2002,
+  author    = {Erik Dotzauer},
+  title     = {Simple model for prediction of loads in district-heating systems},
+  journal   = {Applied Energy},
+  year      = {2002},
+  volume    = {73},
+  number    = {3–4},
+  pages     = {277-284},
+  issn      = {0306-2619},
+  abstract  = {In order to improve the operation of district-heating systems, it is necessary for the energy companies to have reliable optimization routines, both computerized and manual, implemented in their organizations. However, before a production plan for the heat-producing units can be constructed, a prediction of the heat demand first needs to be determined. The outdoor temperature, together with the social behaviour of the consumers, have the greatest influence on the demand. This is also the core of the load prediction model developed in this paper. Several methodologies have been proposed for heat-load forecasting, but due to lack in measured data and due to the uncertainties that are present in the weather forecasts, many of them will fail in practice. In such situations, a more simple model may give as good predictions as an advanced one. This is also the experience from the applications analyzed in this paper.},
+  doi       = {10.1016/S0306-2619(02)00078-8},
+  groups    = {single house forecasting},
+  keywords  = {Heat-load forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.22},
+}
+
+@Book{Duffie2006,
+  title     = {Solar Engineering of Thermal Processes, 3rd Edition},
+  publisher = {Wiley},
+  year      = {2006},
+  author    = {John A. Duffie and William A. Beckman},
+  groups    = {correction},
+  owner     = {pb},
+  timestamp = {2012.03.18},
+}
+
+@Book{Embrechts1997,
+  title     = {Modelling Extremal Events: For Insurance and Finance},
+  publisher = {Springer},
+  year      = {1997},
+  author    = {Embrechts, P. and Kl{\"u}ppelberg, C. and Mikosch, T.},
+  series    = {Applications of mathematics},
+  isbn      = {9783540609315},
+  file      = {Embrechts1997.pdf:Embrechts1997.pdf:PDF},
+  groups    = {EVT},
+  lccn      = {97012308},
+  owner     = {pb},
+  timestamp = {2014.01.06},
+  url       = {http://books.google.dk/books?id=BXOI2pICfJUC},
+}
+
+@TechReport{ENFOR2010,
+  author    = {ENFOR},
+  title     = {Detailed measurements of heat and electricity consumption in Sønderborg households during the heating season 08/09},
+  year      = {2010},
+  file      = {ENFOR2010.pdf:ENFOR2010.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2010.10.04},
+  url       = {http://www.enfor.dk/pub/03EKS0009A002-A.pdf},
+}
+
+@TechReport{ENFOR2010a,
+  author    = {ENFOR},
+  title     = {Analysis of energy consumption in single family houses},
+  year      = {2010},
+  note      = {Prepared by ENFOR A/S for The Danish Electricity Saving Trust. Document ID: 03EKS0009A002-A.},
+  file      = {:techreports/note_enerConsSdb_pub.pdf:PDF},
+  groups    = {Buildings, PerformanceKPI},
+  owner     = {pb},
+  timestamp = {2010.10.04},
+  url       = {http://www.enfor.dk/pub/03EKS0009A002-A.pdf},
+}
+
+@TechReport{ENFOR2008,
+  author    = {ENFOR},
+  title     = {Estimation of UA-values for single-family houses},
+  year      = {2008},
+  groups    = {Buildings, PerformanceKPI},
+  owner     = {pb},
+  timestamp = {2010.10.04},
+}
+
+@Article{Epanechnikov1969,
+  Title                    = {Non-parametric estimation of a multivariate probability density},
+  Author                   = {Epanechnikov, Vassiliy A},
+  Journal                  = {Theory of Probability \& Its Applications},
+  Year                     = {1969},
+  Number                   = {1},
+  Pages                    = {153-158},
+  Volume                   = {14},
+
+  Owner                    = {pb},
+  Publisher                = {SIAM},
+  Timestamp                = {2013.11.18}
+}
+
+@Article{Espinar2009,
+  Title                    = {Analysis of different comparison parameters applied to solar radiation data from satellite and German radiometric stations},
+  Author                   = {Bella Espinar and Lourdes Ramírez and Anja Drews and Hans Georg Beyer and Luis F. Zarzalejo and Jesús Polo and Luis Martín},
+  Journal                  = {Solar Energy},
+  Year                     = {2009},
+  Number                   = {1},
+  Pages                    = {118-125},
+  Volume                   = {83},
+
+  Abstract                 = {In this paper new comparison parameters are defined for assessing statistical similarity between two data sets. The new parameters are based on the commonly used Kolmogorov–Smirnov test. They allow quantifying differences between the cumulative distribution functions of each data series. These parameters are applied to global horizontal daily irradiation values from pyranometric measurements and satellite data. The test data from 38 stations distributed throughout Germany cover the time from 1995 until 2003. The results affirm that the new parameters contribute valuable information to the comparison of data sets complementing those that are found with the mean bias and root mean squared differences.},
+  Doi                      = {10.1016/j.solener.2008.07.009},
+  File                     = {Espinar2009.pdf:Espinar2009.pdf:PDF},
+  ISSN                     = {0038-092X},
+  Keywords                 = {Comparison parameters},
+  Owner                    = {pb},
+  Timestamp                = {2012.02.15}
+}
+
+@Article{Espinoza2006,
+  Title                    = {Fixed-size least squares support vector machines: A large scale application in electrical load forecasting},
+  Author                   = {Espinoza, Marcelo and Suykens, Johan AK and De Moor, Bart},
+  Journal                  = {Computational Management Science},
+  Year                     = {2006},
+  Number                   = {2},
+  Pages                    = {113--129},
+  Volume                   = {3},
+
+  File                     = {Espinoza2006.pdf:Espinoza2006.pdf:PDF},
+  Owner                    = {pb},
+  Publisher                = {Springer},
+  Quality                  = {1},
+  Timestamp                = {2014.06.24}
+}
+
+@Manual{Eudirective2010,
+  Title                    = {EU Directive 2010/31/EU on Energy Performance for Buildings from 19 May 2010},
+  Author                   = {EU},
+  Year                     = {2010},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.11}
+}
+
+@Unpublished{Fan2009,
+  author    = {Fan, J. and Chen, Z. and Furbo, S. and Perers, B. and Karlsson, B.},
+  title     = {EFFICIENCY AND LIFETIME OF SOLAR COLLECTORS FOR SOLAR HEATING PLANTS},
+  note      = {Proceedings of the ISES Solar World Congress 2009: Renewable Energy Shaping Our Future},
+  year      = {2009},
+  file      = {Fan2009.pdf:Fan2009.pdf:PDF},
+  groups    = {Collector testing},
+  owner     = {pb},
+  timestamp = {2009.08.31},
+}
+
+@Article{Fan2012,
+  Title                    = {Short-Term Load Forecasting Based on a Semi-Parametric Additive Model},
+  Author                   = {Shu Fan and Hyndman, R.J.},
+  Journal                  = {Power Systems, IEEE Transactions on},
+  Year                     = {2012},
+
+  Month                    = {Feb},
+  Number                   = {1},
+  Pages                    = {134-141},
+  Volume                   = {27},
+
+  Abstract                 = {Short-term load forecasting is an essential instrument in power system planning, operation, and control. Many operating decisions are based on load forecasts, such as dispatch scheduling of generating capacity, reliability analysis, and maintenance planning for the generators. Overestimation of electricity demand will cause a conservative operation, which leads to the start-up of too many units or excessive energy purchase, thereby supplying an unnecessary level of reserve. On the other hand, underestimation may result in a risky operation, with insufficient preparation of spinning reserve, causing the system to operate in a vulnerable region to the disturbance. In this paper, semi-parametric additive models are proposed to estimate the relationships between demand and the driver variables. Specifically, the inputs for these models are calendar variables, lagged actual demand observations, and historical and forecast temperature traces for one or more sites in the target power system. In addition to point forecasts, prediction intervals are also estimated using a modified bootstrap method suitable for the complex seasonality seen in electricity demand data. The proposed methodology has been used to forecast the half-hourly electricity demand for up to seven days ahead for power systems in the Australian National Electricity Market. The performance of the methodology is validated via out-of-sample experiments with real data from the power system, as well as through on-site implementation by the system operator.},
+  Doi                      = {10.1109/TPWRS.2011.2162082},
+  File                     = {Fan2012.pdf:Fan2012.pdf:PDF},
+  ISSN                     = {0885-8950},
+  Keywords                 = {load forecasting;power markets;power system control;power system economics;power system faults;power system planning;statistical analysis;Australian National Electricity Market;electricity demand data;electricity demand overestimation;generating capacity dispatch scheduling;generator maintenance planning;modified bootstrap method;power system control;power system disturbance;power system operation;power system planning;prediction interval estimation;reliability analysis;semiparametric additive model;short-term load forecasting;spinning reserve preparation;Computational modeling;Electricity;Forecasting;Input variables;Load forecasting;Load modeling;Predictive models;Additive model;forecast distribution;short-term load forecasting;time series},
+  Owner                    = {pb},
+  Timestamp                = {2014.05.05}
+}
+
+@Article{Fang2012,
+  Title                    = {Smart Grid - The New and Improved Power Grid: A Survey},
+  Author                   = {Fang, Xi and Misra, Satyajayant and Xue, Guoliang and Yang, Dejun},
+  Journal                  = {Communications Surveys Tutorials, IEEE},
+  Year                     = {2012},
+
+  Month                    = {Fourth},
+  Number                   = {4},
+  Pages                    = {944-980},
+  Volume                   = {14},
+
+  Abstract                 = {The Smart Grid, regarded as the next generation power grid, uses two-way flows of electricity and information to create a widely distributed automated energy delivery network. In this article, we survey the literature till 2011 on the enabling technologies for the Smart Grid. We explore three major systems, namely the smart infrastructure system, the smart management system, and the smart protection system. We also propose possible future directions in each system. colorred{Specifically, for the smart infrastructure system, we explore the smart energy subsystem, the smart information subsystem, and the smart communication subsystem.} For the smart management system, we explore various management objectives, such as improving energy efficiency, profiling demand, maximizing utility, reducing cost, and controlling emission. We also explore various management methods to achieve these objectives. For the smart protection system, we explore various failure protection mechanisms which improve the reliability of the Smart Grid, and explore the security and privacy issues in the Smart Grid.},
+  Doi                      = {10.1109/SURV.2011.101911.00087},
+  File                     = {Fang2012.pdf:Fang2012.pdf:PDF},
+  ISSN                     = {1553-877X},
+  Keywords                 = {Energy management;NIST;Power grids;Privacy;Security;Smart grids;Smart grid;communications;energy;information;management;power grid;privacy;protection;security;survey},
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2014.03.06}
+}
+
+@Article{Fard2013,
+  Title                    = {A hybrid method based on wavelet, ANN and ARIMA model for short-term load forecasting},
+  Author                   = {Fard, Abdollah Kavousi and Akbari-Zadeh, Mohammad-Reza},
+  Journal                  = {Journal of Experimental \& Theoretical Artificial Intelligence},
+  Year                     = {2013},
+  Pages                    = {1-16},
+
+  Abstract                 = {In the new competitive electricity markets, the necessity of appropriate load forecasting tools for accurate scheduling is completely evident. The model which is utilised for the forecasting purposes determines how much the forecasted results would be dependable. In this regard, this paper proposes a new hybrid forecasting method based on the wavelet transform, autoregressive integrated moving average (ARIMA) and artificial neural network (ANN) for short-term load forecasting. In the proposed model, the autocorrelation function and the partial autocorrelation function are utilised to see the stationary or non-stationary behaviour of the load time series. Then, by the use of Akaike information criterion, the appropriate order of the ARIMA model is found. Now, the ARIMA model would capture the linear component of the load time series and the residuals would contain only the nonlinear components. The nonlinear part would be decomposed by the discrete wavelet transform into its sub-frequencies. Several ANNs are applied to the details and approximation components of the residuals signal to predict the future load sample. Finally, the outputs of the ARIMA and ANNs are summed. The empirical results show that the proposed hybrid method can improve the load forecasting accuracy suitably.},
+  Doi                      = {10.1080/0952813X.2013.813976},
+  Eprint                   = {http://www.tandfonline.com/doi/pdf/10.1080/0952813X.2013.813976},
+  File                     = {Fard2013.pdf:Fard2013.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2013.11.12},
+  Url                      = {http://www.tandfonline.com/doi/abs/10.1080/0952813X.2013.813976}
+}
+
+@Article{Farinaccio1999,
+  author    = {Linda Farinaccio and Radu Zmeureanu},
+  title     = {Using a pattern recognition approach to disaggregate the total electricity consumption in a house into the major end-uses},
+  journal   = {Energy and Buildings},
+  year      = {1999},
+  volume    = {30},
+  number    = {3},
+  pages     = {245-259},
+  issn      = {0378-7788},
+  abstract  = {The method presented in this paper shows a promising potential for application in residential buildings. The results prove that the whole-house electricity consumption can be disaggregated into its major end-uses, using a pattern recognition approach and only one sensor installed on the main electric entrance of the house. It also required a one-time submetering of the target appliances during the training period, of about a week, to find the electric characteristics of appliances. The results are provided in terms of daily load profiles, energy consumption and energy contribution of selected appliances. The proposed method was tested with monitored data from 3 weeks: (i) the training period of 1 week in October, (ii) the near-to-date testing period of 1 week in November and (iii) the far-to-date testing period of 1 week in January. For instance, the difference between monitored and estimated contribution is, for the month of October 1996, as follows: (i) 13 kW h or $0.85 for the \{DHW\} heater and (ii) 6 kW h or $0.36 for the refrigerator. The overall difference for both appliances does not exceed $1.25 for the month of October, for a total electricity bill of 912 kW h and $60.60, which appears to be acceptable for every homeowner. The errors in evaluating the daily energy consumption is between −10.5% and 15.9% for both the \{DWH\} heater and the refrigerator. },
+  doi       = {http://dx.doi.org/10.1016/S0378-7788(99)00007-9},
+  groups    = {kernelpaper},
+  keywords  = {Residential buildings},
+  owner     = {pb},
+  timestamp = {2013.11.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0378778899000079},
+}
+
+@Article{Fischer2004,
+  author    = {Fischer, S. and Heidemann, W. and Müller-Steinhagen, H. and Perers, B. and Bergquist, P. and Hellström, B.},
+  title     = {Collector test method under quasi-dynamic conditions according to the European Standard EN 12975-2},
+  journal   = {Solar Energy},
+  year      = {2004},
+  volume    = {76},
+  number    = {1-3},
+  pages     = {117-123},
+  issn      = {0038092x},
+  abstract  = {In April 2001 the new European Standard EN 12975:2000: Thermal solar systems and components Solar Collectors was established. With the publication of this European standard all national standards, related to the same topic, have to be withdrawn by the nations of the European Community. Now only one standard for testing solar collectors is valid throughout Europe. This European Standard specifies test methods for validating the durability, reliability and safety requirements for liquid heating collectors. The standard also includes two alternative test methods for the thermal performance characterization for liquid heating collectors. Apart from the well-known test method under steady-state conditions according to ISO 9806-1, ISO 9806-3 and ASHRAE 93-77 the EN 12975 permits a quasi-dynamic test method for the thermal performance characterization of solar thermal collectors. This paper presents the improved approach to outdoor performance testing of solar thermal collectors under quasi-dynamic test conditions. The test requirements and collector theory are closely connected to those long agreed on for steady-state testing, as described in the ISO and ASHRAE standards mentioned above. The most important effects for the all day performance of the collector are taken into account. The test method covers most collector designs on the market today (except ICS type). Only some correction terms are added to the basic collector models of the present steady-state test methods. Still this limited change will allow test data to be collected and used from whole days. An important fact is that the collector model used for the parameter identification is written so that the error in collector output power is minimized. Therefore an accurate long-term prediction of the collector performance can be an integral part of the test method, where the same collector model and parameters are used for both testing and prediction.},
+  doi       = {10.1016/j.solener.2003.07.021},
+  file      = {Fischer2004.pdf:Fischer2004.pdf:PDF},
+  groups    = {Collector testing},
+  owner     = {pb},
+  timestamp = {2011.02.24},
+}
+
+@InProceedings{FonsecaJunior2011,
+  author    = {Fonseca Júnior, J.G.S. and Oozeki, T. and Takashima, T. and Ogimoto, K.},
+  title     = {Analysis of the use of support vector regression and neural networks to forecast insolation for 25 locations in japan},
+  booktitle = {Proceedings of ISES Solar World Conference 2011},
+  year      = {2011},
+  file      = {FonsecaJunior2011.pdf:FonsecaJunior2011.pdf:PDF},
+  groups    = {forecasting, phdthesis},
+  owner     = {pb},
+  timestamp = {2012.04.21},
+}
+
+@TechReport{Fredslund2013,
+  Title                    = {Load profiles for Supermarket refrigeration},
+  Author                   = {Kristian Fredslund},
+  Institution              = {IPU Refrigeration and Energy Technology},
+  Year                     = {2013},
+
+  Owner                    = {pb},
+  Timestamp                = {2013.03.21}
+}
+
+@Article{Friederichs2012,
+  author    = {Friederichs, Petra and Thorarinsdottir, Thordis L.},
+  title     = {Forecast verification for extreme value distributions with an application to probabilistic peak wind prediction},
+  journal   = {Environmetrics},
+  year      = {2012},
+  volume    = {23},
+  number    = {7},
+  pages     = {579-594},
+  issn      = {1099-095X},
+  abstract  = {Predictions of the uncertainty associated with extreme events are a vital component of any prediction system for such events. Consequently, the prediction system ought to be probabilistic in nature, with the predictions taking the form of probability distributions. This paper concerns probabilistic prediction systems where the data are assumed to follow either a generalized extreme value (GEV) distribution or a generalized Pareto distribution. In this setting, the properties of proper scoring rules that facilitate the assessment of the prediction uncertainty are investigated, and closed form expressions for the continuous ranked probability score (CRPS) are provided. In an application to peak wind prediction, the predictive performance of a GEV model under maximum likelihood estimation, optimum score estimation with the CRPS, and a Bayesian framework are compared. The Bayesian inference yields the highest overall prediction skill and is shown to be a valuable tool for covariate selection, while the predictions obtained under optimum CRPS estimation are the sharpest and give the best performance for high thresholds and quantiles. Copyright © 2012 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/env.2176},
+  file      = {Friederichs2012.pdf:Friederichs2012.pdf:PDF},
+  groups    = {EVT},
+  keywords  = {Bayesian variable selection, continuous ranked probability score, extreme events, optimum score estimation, prediction uncertainty, wind gusts},
+  owner     = {pb},
+  timestamp = {2013.11.27},
+  url       = {http://dx.doi.org/10.1002/env.2176},
+}
+
+@Book{Friedman2001,
+  Title                    = {The elements of statistical learning},
+  Author                   = {Friedman, J. and Hastie, T. and Tibshirani, R.},
+  Publisher                = {Springer Series in Statistics},
+  Year                     = {2001},
+  Volume                   = {1},
+
+  Owner                    = {pb},
+  Timestamp                = {2012.03.26}
+}
+
+@Article{Fthenakis2009,
+  author    = {Vasilis Fthenakis and James E. Mason and Ken Zweibel},
+  title     = {The technical, geographical, and economic feasibility for solar energy to supply the energy needs of the US},
+  journal   = {Energy Policy},
+  year      = {2009},
+  volume    = {37},
+  number    = {2},
+  pages     = {387-399},
+  issn      = {0301-4215},
+  abstract  = {So far, solar energy has been viewed as only a minor contributor in the energy mixture of the US due to cost and intermittency constraints. However, recent drastic cost reductions in the production of photovoltaics (PV) pave the way for enabling this technology to become cost competitive with fossil fuel energy generation. We show that with the right incentives, cost competitiveness with grid prices in the US (e.g., 6–10&#xa0;US¢/kWh) can be attained by 2020. The intermittency problem is solved by integrating PV with compressed air energy storage (CAES) and by extending the thermal storage capability in concentrated solar power (CSP). We used hourly load data for the entire US and 45-year solar irradiation data from the southwest region of the US, to simulate the CAES storage requirements, under worst weather conditions. Based on expected improvements of established, commercially available PV, CSP, and CAES technologies, we show that solar energy has the technical, geographical, and economic potential to supply 69% of the total electricity needs and 35% of the total (electricity and fuel) energy needs of the US by 2050. When we extend our scenario to 2100, solar energy supplies over 90%, and together with other renewables, 100% of the total US energy demand with a corresponding 92% reduction in energy-related carbon dioxide emissions compared to the 2005 levels.},
+  doi       = {10.1016/j.enpol.2008.08.011},
+  file      = {Fthenakis2009.pdf:Fthenakis2009.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Solar},
+  owner     = {pb},
+  timestamp = {2012.04.02},
+}
+
+@Other{Furbo2013,
+  Title                    = {Solar/electric heating systems for the future energy system},
+  Abstract                 = {The project “Solar/electric heating systems in the future energy system” was carried out in the period 2008‐2013. The project partners were DTU Byg, DTU Informatics (now DTU Compute), DMI, ENFOR A/S and COWI A/S. The companies Ajva ApS, Ohmatex ApS and Innogie ApS worked together with the project partners in two connected projects in order to develop solar/electric heating systems for laboratory tests. The project was financed by the Danish Agency for Science, Technology and Innovation under the Danish Council for Strategic Research in the program Sustainable Energy and Environment. The DSF number of the project is 2104‐07‐0021/09‐063201/DSF. This report is the final report of the project. The aim of the project is to elucidate how individual heating units for single family houses are best designed in order to fit into the future energy system. The units are based on solar energy, electrical heating elements/heat pump, advanced heat storage tanks and advanced control systems. Heat is produced by solar collectors in sunny periods and by electrical heating elements/heat pump. The electrical heating elements/heat pump will be in operation in periods where the heat demand cannot be covered by solar energy. The aim is to use the auxiliary heating units when the electricity price is low, e.g. due to large electricity production by wind turbines. The unit is equipped with an advanced control system where the control of the auxiliary heating is based on forecasts of the electricity price, the heat demand and the solar energy production. Consequently, the control is based on weather forecasts. Three differently designed heating units are tested in a laboratory test facility. The systems are compared on the basis of: - energy consumption for the auxiliary heating - energy cost for the auxiliary heating - net utilized solar energy},
+  Author                   = {Furbo, Simon and Dannemand, Mark and Perers, Bengt and Andersen, Elsa and Fan, Jianhua and Bacher, Peder and Madsen, Henrik and Halvgaard, Rasmus and Nielsen, Henrik Aalborg and Pagh Nielsen, Kristian and Lundholm, Sisse Camilla and Hansen Sass, Bent and Engberg Pedersen, Thomas and Nymann Rud, Jakob and Harley Hansen, Kristian},
+  ISBN                     = {9788778773739},
+  Language                 = {eng},
+  Publisher                = {Technical University of Denmark, Department of Civil Engineering},
+  Year                     = {2013}
+}
+
+@Article{Geiger2002,
+  author    = {M. Geiger and L. Diabaté and L. Ménard and L. Wald},
+  title     = {A web service for controlling the quality of measurements of global solar irradiation},
+  journal   = {Solar Energy},
+  year      = {2002},
+  volume    = {73},
+  number    = {6},
+  pages     = {475-480},
+  issn      = {0038-092X},
+  abstract  = {The control of the quality of irradiation data is often a prerequisite to their further processing. Though data are usually controlled by meteorological offices, the sources are so numerous that the user often faces time-series of measurements containing questionable values. As customers of irradiation data, we established our own procedures to screen time-series of measurements. Since this problem of quality control is of concern to many researchers and engineers and since it is often a lengthy and tedious task, we decided to make this screening procedure available to everyone as a web service. This service is the purpose of this paper. The objective is not to perform a precise and fine control, an objective out of reach without details on the site and instruments, but to perform a likelihood control of the data and to check their plausibility. This is achieved by comparing observations with some expectations based upon the extraterrestrial irradiation and a simulation of the irradiation for clear skies. This service is available to everyone on the Web site www.helioclim.net. It offers a very convenient means to check time-series of irradiation: data are input in a HTML page by a copy and paste procedure and the return is also a HTML page that can be analyzed in detail for the data flagged as suspicious.},
+  doi       = {10.1016/S0038-092X(02)00121-4},
+  file      = {Geiger2002.pdf:Geiger2002.pdf:PDF},
+  groups    = {correction},
+  owner     = {pb},
+  timestamp = {2012.02.14},
+}
+
+@Article{Ghiaus2006,
+  Title                    = {Experimental estimation of building energy performance by robust regression},
+  Author                   = {Cristian Ghiaus},
+  Journal                  = {Energy and Buildings },
+  Year                     = {2006},
+  Number                   = {6},
+  Pages                    = {582 - 587},
+  Volume                   = {38},
+
+  Abstract                 = {Estimation of energy performance indexes, like the heating curve or the energy signature, requires robust regression of the heating losses on the outdoor temperature. The solution proposed in this paper is to use the range between the 1st and the 3rd quartile of the quantile–quantile (q–q) plot to check if the heating losses and the outdoor temperature have the same distribution and, if yes, to perform the regression in this range of the q–q plot. The result is a model that conserves its prediction performance for data sets of the outdoor temperature different of those used for parameter identification. The robust model gives the overall heat transfer coefficient and the base temperature, and it may be used to estimate the energy consumption for data sets of the outdoor temperature coming from different time—space locations. },
+  Doi                      = {http://dx.doi.org/10.1016/j.enbuild.2005.08.014},
+  File                     = {Ghiaus2006.pdf:Ghiaus2006.pdf:PDF},
+  ISSN                     = {0378-7788},
+  Keywords                 = {Parameter identification},
+  Owner                    = {pbac},
+  Timestamp                = {2017.03.27},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0378778805001799}
+}
+
+@Article{Giabardo2010,
+  author    = {Giabardo, Paolo and Zugno, Marco and Pinson, Pierre and Madsen, Henrik},
+  title     = {Feedback, competition and stochasticity in a day ahead electricity market},
+  journal   = {Energy Economics},
+  year      = {2010},
+  volume    = {32},
+  number    = {2},
+  pages     = {292--301},
+  file      = {Giabardo2010.pdf:Giabardo2010.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.25},
+}
+
+@Article{Gilleland2011,
+  author    = {Eric Gilleland and Richard W. Katz},
+  title     = {New software to analyze how extremes change over time},
+  journal   = {Eos},
+  year      = {2011},
+  volume    = {92},
+  number    = {2},
+  pages     = {13--14},
+  file      = {Gilleland2011.pdf:Gilleland2011.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  quality   = {1},
+  timestamp = {2014.03.18},
+}
+
+@Article{Gneiting2011,
+  author    = {Tilmann Gneiting},
+  title     = {Quantiles as optimal point forecasts},
+  journal   = {International Journal of Forecasting},
+  year      = {2011},
+  volume    = {27},
+  number    = {2},
+  pages     = {197 - 207},
+  issn      = {0169-2070},
+  abstract  = {Loss functions play a central role in the theory and practice of forecasting. If the loss function is quadratic, the mean of the predictive distribution is the unique optimal point predictor. If the loss is symmetric piecewise linear, any median is an optimal point forecast. Quantiles arise as optimal point forecasts under a general class of economically relevant loss functions, which nests the asymmetric piecewise linear loss, and which we refer to as generalized piecewise linear (GPL). The level of the quantile depends on a generic asymmetry parameter which reflects the possibly distinct costs of underprediction and overprediction. Conversely, a loss function for which quantiles are optimal point forecasts is necessarily GPL. We review characterizations of this type in the work of Thomson, Saerens and Komunjer, and relate to proper scoring rules, incentive-compatible compensation schemes and quantile regression. In the empirical part of the paper, the relevance of decision theoretic guidance in the transition from a predictive distribution to a point forecast is illustrated using the Bank of England’s density forecasts of United Kingdom inflation rates, and probabilistic predictions of wind energy resources in the Pacific Northwest. },
+  doi       = {http://dx.doi.org/10.1016/j.ijforecast.2009.12.015},
+  file      = {Gneiting2011.pdf:Gneiting2011.pdf:PDF},
+  groups    = {OptimalBidding},
+  keywords  = {Decision making},
+  owner     = {pb},
+  timestamp = {2015.12.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0169207010000063},
+}
+
+@Article{Gneiting2006,
+  Title                    = {Calibrated probabilistic forecasting at the stateline wind energy center: The regime-switching space--time method},
+  Author                   = {Gneiting, Tilmann and Larson, Kristin and Westrick, Kenneth and Genton, Marc G and Aldrich, Eric},
+  Journal                  = {Journal of the American Statistical Association},
+  Year                     = {2006},
+  Number                   = {475},
+  Pages                    = {968--979},
+  Volume                   = {101},
+
+  File                     = {Gneiting2006.pdf:Gneiting2006.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.24}
+}
+
+@Article{Godfrey1980,
+  Title                    = {Correlation methods},
+  Author                   = {Godfrey, K.R.},
+  Journal                  = {Automatica},
+  Year                     = {1980},
+  Number                   = {5},
+  Pages                    = {527-534},
+  Volume                   = {16},
+
+  Abstract                 = {The paper discusses the theory of correlation methods, emphasising the use of crosscorrelation to determine weighting functions of linear systems. The corresponding frequency domain expressions are derived. Correlation methods have been applied widely in engineering and several applications, and the problems associated with them, are discussed.},
+  Copyright                = {International Federation of Automatic Control IFAC},
+  File                     = {Godfrey1980.pdf:Godfrey1980.pdf:PDF},
+  ISSN                     = {00051098},
+  Language                 = {English},
+  Owner                    = {pb},
+  Timestamp                = {2009.01.29}
+}
+
+@Article{Grewal1976,
+  author    = {Grewal, M.S. and Glover, K.},
+  title     = {Identifiability of linear and nonlinear dynamical systems},
+  journal   = {Automatic Control, IEEE Transactions on},
+  year      = {1976},
+  volume    = {21},
+  number    = {6},
+  pages     = {833-837},
+  month     = {Dec},
+  issn      = {0018-9286},
+  abstract  = {This short paper considers the identification of dynamical systems from input-output data. The problem of parameter identifiability for such systems is approached by considering whether system outputs obtained with different parameter values can be distinguished one from another. The results are stated formally by defining the notion of "output distinguishability." Parameter identifiability is then defined precisely in terms of output distinguishability. Relationships have been developed with the other definitions such as least square identifiability and identifiability from the transfer function. Several results for linear and nonlinear systems are presented with examples.},
+  doi       = {10.1109/TAC.1976.1101375},
+  file      = {Grewal1976.pdf:Grewal1976.pdf:PDF},
+  groups    = {Identifiability},
+  keywords  = {Linear systems, time-invariant continuous-time;Nonlinear systems, continuous-time;Parameter identification;Automatic control;Data mining;Differential equations;Least squares methods;Nonlinear dynamical systems;Nonlinear systems;Parameter estimation;Stability;Time invariant systems;Transfer functions},
+  owner     = {pb},
+  timestamp = {2014.09.19},
+}
+
+@Article{Gutschker2008,
+  author    = {Gutschker, Olaf},
+  title     = {Parameter identification with the software package LORD},
+  journal   = {Building and Environment},
+  year      = {2008},
+  volume    = {43},
+  number    = {2},
+  pages     = {163-169},
+  issn      = {03601323},
+  abstract  = {This paper describes some basic concepts of the software package LORD, which was developed during the PASLINK projects. LORD allows the modelling and identification of thermal systems, in particular building components. A certain degree of experience is necessary for the correct application of the software. Although the technique used is the well-known lumped parameter modelling, which describes the thermal system as an electrical analogue RC network, several improvements are possible. One of them is the stochastic treatment of the data. Such developments of the package have produced improved results and expanded its range of application.},
+  copyright = {Elsevier Ltd},
+  editor    = {P.A. Strachan, P.H. Baker},
+  file      = {Gutschker2008.pdf:Gutschker2008.pdf:PDF},
+  groups    = {Buildings},
+  language  = {English},
+  owner     = {pb},
+  timestamp = {2010.11.20},
+}
+
+@Article{Haan1998,
+  author    = {de Haan, Laurens and de Ronde, John},
+  title     = {Sea and Wind: Multivariate Extremes at Work},
+  journal   = {Extremes},
+  year      = {1998},
+  volume    = {1},
+  number    = {1},
+  pages     = {7-45},
+  issn      = {1386-1999},
+  doi       = {10.1023/A:1009909800311},
+  file      = {Haan1998.pdf:Haan1998.pdf:PDF},
+  groups    = {EVT},
+  keywords  = {failure probability; multidimensional extremes in hydrology},
+  language  = {English},
+  owner     = {pb},
+  publisher = {Kluwer Academic Publishers},
+  timestamp = {2013.11.27},
+  url       = {http://dx.doi.org/10.1023/A%3A1009909800311},
+}
+
+@Book{Haan2006,
+  Title                    = {Extreme value theory},
+  Author                   = {Haan, L de Laurens and Ferreira, Ana},
+  Publisher                = {Springer},
+  Year                     = {2006},
+
+  File                     = {Haan2006.pdf:Haan2006.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2014.01.15}
+}
+
+@InProceedings{Halvgaard2015,
+  Title                    = {Stochastic Greybox Modeling of an Alternating Activated Sludge Process},
+  Author                   = {Halvgaard, {Rasmus Fogtmann} and T. Munk-Nielsen and P. Tychsen and M. Grum and Henrik Madsen},
+  Booktitle                = {IWA Specialist Conference, Watermatex},
+  Year                     = {2015},
+  Note                     = {Extended abstract to be presented as poster at Watermatex},
+
+  Keywords                 = {WWTP, Greybox, System Identification},
+  Owner                    = {pb},
+  Timestamp                = {2015.06.15},
+  Type                     = {ConferencePaper <importModel: ConferenceImportModel>}
+}
+
+@Article{Halvgaard2012a,
+  author    = {Halvgaard, Rasmus and Bacher, Peder and Perers, Bengt and Andersen, Elsa and Furbo, Simon and Jørgensen, John B. and Poulsen, Niels K. and Madsen, Henrik},
+  title     = {Model Predictive Control for a Smart Solar Tank Based on Weather and Consumption Forecasts},
+  journal   = {Energy Procedia},
+  year      = {2012},
+  volume    = {30},
+  pages     = {270-278},
+  issn      = {18766102},
+  abstract  = {In this work the heat dynamics of a storage tank were modelled on the basis of data and maximum likelihood methods. The resulting grey-box model was used for Economic Model Predictive Control (MPC) of the energy in the tank. The control objective was to balance the energy from a solar collector and the heat consumption in a residential house. The storage tank provides heat in periods where there is low solar radiation and stores heat when there is surplus solar heat. The forecasts of consumption patterns were based on data obtained from meters in a group of single-family houses in Denmark. The tank can also be heated by electric heating elements if necessary, but the electricity costs of operating these heating elements should be minimized. Consequently, the heating elements should be used in periods with cheap electricity. It is proposed to integrate a price-sensitive control to enable the storage tank to serve a smart energy system in which flexible consumers are expected to help balance fluctuating renewable energy sources like wind and solar. Through simulations, the impact of applying Economic MPC shows annual electricity cost savings up to 25-30\%.},
+  doi       = {10.1016/j.egypro.2012.11.032},
+  file      = {Halvgaard2012a.pdf:Halvgaard2012a.pdf:PDF},
+  groups    = {MPC},
+  owner     = {pb},
+  timestamp = {2013.01.17},
+}
+
+@InProceedings{Halvgaard2012,
+  author    = {Rasmus Halvgaard and Niels Kjølstad Poulsen and Henrik Madsen and John Bagterp Jørgensen},
+  title     = {{E}conomic {M}odel {P}redictive {C}ontrol for {B}uilding {C}limate {C}ontrol in a {S}mart {G}rid},
+  booktitle = {2012 IEEE PES Innovative Smart Grid Technologies (ISGT)},
+  year      = {2012},
+  volume    = {9781457721588},
+  pages     = {2012ISGT0195},
+  publisher = {IEEE},
+  file      = {Halvgaard2012.pdf:Halvgaard2012.pdf:PDF},
+  groups    = {MPC},
+  owner     = {pb},
+  timestamp = {2012.04.20},
+}
+
+@Article{Hammarsten1987,
+  author                     = {Hammarsten, S},
+  title                      = {A critical-appraisal of energy-signature models},
+  journal                    = {Applied Energy},
+  year                       = {1987},
+  volume                     = {26},
+  number                     = {2},
+  pages                      = {97-110},
+  issn                       = {0306-2619},
+  abstract                   = {The use of different energy-signature (ES) models jor energy consumption predictions and building parameter estimations is reviewed. For predictions using time-steps of one day or longer, static ES models are found to be useful. Recommendations for the choice of model are given. The use orES models for the estimation of building parameters, e.g. for an energy audit, should only be done with great caution, as there can be considerable errors. The development of more sophisticated dynamic models may soh,e some o[ the problems encountered with the static models discussed here.},
+  file                       = {Hammarsten1987.pdf:Hammarsten1987.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {13},
+  owner                      = {pb},
+  review                     = {Gives an overview of how to apply linear models.},
+  times-cited                = {7},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1987G368800002},
+}
+
+@Book{Sass2002,
+  Title                    = {The operational DMI-HIRLAM system 2002-version},
+  Author                   = {Hansen Sass, Bent and Woetmann Nielsen, Niels and U. Jørgensen, Jess and Amstrup, Bjarne and Kmit, Maryanne and S. Mogensen, Kristian},
+  Publisher                = {DMI},
+  Year                     = {2002},
+
+  File                     = {Sass2002.pdf:Sass2002.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.09.06}
+}
+
+@Article{Hart1992,
+  author    = {Hart, G.W.},
+  title     = {Nonintrusive appliance load monitoring},
+  journal   = {Proceedings of the IEEE},
+  year      = {1992},
+  volume    = {80},
+  number    = {12},
+  pages     = {1870-1891},
+  issn      = {0018-9219},
+  abstract  = {A nonintrusive appliance load monitor that determines the energy consumption of individual appliances turning on and off in an electric load, based on detailed analysis of the current and voltage of the total load, as measured at the interface to the power source is described. The theory and current practice of nonintrusive appliance load monitoring are discussed, including goals, applications, load models, appliance signatures, algorithms, prototypes field-test results, current research directions, and the advantages and disadvantages of this approach relative to intrusive monitoring},
+  doi       = {10.1109/5.192069},
+  file      = {Hart1992.pdf:Hart1992.pdf:PDF},
+  groups    = {kernelpaper},
+  keywords  = {load (electric);monitoring;power consumption;power system measurement;appliance load monitoring;appliance signatures;current;electric load;energy consumption;load models;nonintrusive monitor;voltage;Current measurement;Electric variables measurement;Energy consumption;Energy measurement;Home appliances;Load modeling;Monitoring;Power measurement;Turning;Voltage},
+  owner     = {pb},
+  timestamp = {2013.11.18},
+}
+
+@Book{Hastie2009,
+  Title                    = {The elements of statistical learning},
+  Author                   = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome and Hastie, T and Friedman, J and Tibshirani, R},
+  Publisher                = {Springer},
+  Year                     = {2009},
+  Number                   = {1},
+  Volume                   = {2},
+
+  File                     = {Hastie2009.pdf:Hastie2009.pdf:PDF},
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2014.06.03}
+}
+
+@InProceedings{Hay1980,
+  Title                    = {Calculation of the solar radiation incident on an inclined surface},
+  Author                   = {Hay, J.E. and Davies, J.A.},
+  Booktitle                = {Proc. 1st Canadian Solar Radiation Data Workshop},
+  Year                     = {1980},
+  Pages                    = {{}},
+  Volume                   = {59},
+
+  File                     = {Hay1980.pdf:Hay1980.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.03.28}
+}
+
+@Article{Heffernan2004,
+  author    = {Heffernan, Janet E. and Tawn, Jonathan A.},
+  title     = {A conditional approach for multivariate extreme values (with discussion)},
+  journal   = {Journal of the Royal Statistical Society: Series B (Statistical Methodology)},
+  year      = {2004},
+  volume    = {66},
+  number    = {3},
+  pages     = {497-546},
+  issn      = {1467-9868},
+  abstract  = {Summary.  Multivariate extreme value theory and methods concern the characterization, estimation and extrapolation of the joint tail of the distribution of a d-dimensional random variable. Existing approaches are based on limiting arguments in which all components of the variable become large at the same rate. This limit approach is inappropriate when the extreme values of all the variables are unlikely to occur together or when interest is in regions of the support of the joint distribution where only a subset of components is extreme. In practice this restricts existing methods to applications where d is typically 2 or 3. Under an assumption about the asymptotic form of the joint distribution of a d-dimensional random variable conditional on its having an extreme component, we develop an entirely new semiparametric approach which overcomes these existing restrictions and can be applied to problems of any dimension. We demonstrate the performance of our approach and its advantages over existing methods by using theoretical examples and simulation studies. The approach is used to analyse air pollution data and reveals complex extremal dependence behaviour that is consistent with scientific understanding of the process. We find that the dependence structure exhibits marked seasonality, with ex- tremal dependence between some pollutants being significantly greater than the dependence at non-extreme levels.},
+  doi       = {10.1111/j.1467-9868.2004.02050.x},
+  file      = {Heffernan2004.pdf:Heffernan2004.pdf:PDF},
+  groups    = {EVT},
+  keywords  = {Air pollution, Asymptotic independence, Bootstrap, Conditional distribution, Gaussian estimation, Multivariate extreme value theory, Semiparametric modelling},
+  owner     = {pb},
+  publisher = {Blackwell Publishing},
+  timestamp = {2013.11.27},
+  url       = {http://dx.doi.org/10.1111/j.1467-9868.2004.02050.x},
+}
+
+@Article{Heide2010,
+  author    = {Dominik Heide and Lueder von Bremen and Martin Greiner and Clemens Hoffmann and Markus Speckmann and Stefan Bofinger},
+  title     = {Seasonal optimal mix of wind and solar power in a future, highly renewable Europe},
+  journal   = {Renewable Energy},
+  year      = {2010},
+  volume    = {35},
+  number    = {11},
+  pages     = {2483-2489},
+  issn      = {0960-1481},
+  abstract  = {The renewable power generation aggregated across Europe exhibits strong seasonal behaviors. Wind power generation is much stronger in winter than in summer. The opposite is true for solar power generation. In a future Europe with a very high share of renewable power generation those two opposite behaviors are able to counterbalance each other to a certain extent to follow the seasonal load curve. The best point of counterbalancing represents the seasonal optimal mix between wind and solar power generation. It leads to a pronounced minimum in required stored energy. For a 100% renewable Europe the seasonal optimal mix becomes 55% wind and 45% solar power generation. For less than 100% renewable scenarios the fraction of wind power generation increases and that of solar power generation decreases.},
+  doi       = {10.1016/j.renene.2010.03.012},
+  file      = {Heide2010.pdf:Heide2010.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Wind power generation},
+  owner     = {pb},
+  timestamp = {2012.04.03},
+}
+
+@Article{Heide2011,
+  author    = {Dominik Heide and Martin Greiner and Lüder von Bremen and Clemens Hoffmann},
+  title     = {Reduced storage and balancing needs in a fully renewable European power system with excess wind and solar power generation},
+  journal   = {Renewable Energy},
+  year      = {2011},
+  volume    = {36},
+  number    = {9},
+  pages     = {2515-2523},
+  issn      = {0960-1481},
+  abstract  = {The storage and balancing needs of a simplified European power system, which is based on wind and solar power generation only, are derived from an extensive weather-driven modeling of hourly power mismatches between generation and load. The storage energy capacity, the annual balancing energy and the balancing power are found to depend significantly on the mixing ratio between wind and solar power generation. They decrease strongly with the overall excess generation. At 50% excess generation the required long-term storage energy capacity and annual balancing energy amount to 1% of the annual consumption. The required balancing power turns out to be 25% of the average hourly load. These numbers are in agreement with current hydro storage lakes in Scandinavia and the Alps, as well as with potential hydrogen storage in mostly North-German salt caverns.},
+  doi       = {10.1016/j.renene.2011.02.009},
+  file      = {Heide2011.pdf:Heide2011.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Energy system design},
+  owner     = {pb},
+  timestamp = {2012.04.11},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0960148111000851},
+}
+
+@InProceedings{Heinemann2006,
+  author    = {Heinemann, D. and Lorenz, E. and Girodo, M.},
+  title     = {Forecasting of Solar Radiation},
+  booktitle = {Solar Resource Management for Electricity Generation from Local Level to Global Scale},
+  year      = {2006},
+  editor    = {Dunlop, E.D. and Wald, L. and Suri, M.},
+  pages     = {83-94},
+  address   = {New York},
+  publisher = {Nova Science Publishers},
+  file      = {Heinemann2006.pdf:Heinemann2006.pdf:PDF},
+  groups    = {forecasting},
+  owner     = {pb},
+  timestamp = {2009.03.05},
+}
+
+@Article{Henze2004,
+  author    = {Gregor P. Henze and Clemens Felsmann and Gottfried Knabe},
+  title     = {Evaluation of optimal control for active and passive building thermal storage},
+  journal   = {International Journal of Thermal Sciences},
+  year      = {2004},
+  volume    = {43},
+  number    = {2},
+  pages     = {173-183},
+  issn      = {1290-0729},
+  abstract  = {Cooling of commercial buildings contributes significantly to the peak demand placed on an electrical utility grid. Time-of-use electricity rates encourage shifting of electrical loads to off-peak periods at night and weekends. Buildings can respond to these pricing signals by shifting cooling-related thermal loads either by precooling the building's massive structure or by using active thermal energy storage systems such as ice storage. While these two thermal batteries have been engaged separately in the past, this paper investigates the merits of harnessing both storage media concurrently in the context of optimal control. The objective function is the total utility bill including the cost of heating and a time-of-use electricity rate without demand charges. The evaluation of the combined optimal control assumes perfect weather prediction and plant modeling, which justifies the application of a consecutive time block optimization that optimizes 24 hour horizons sequentially. The analysis shows that the combined utilization leads to cost savings that is significantly greater than either storage but less than the sum of the individual savings. The findings reveal that the cooling-related on-peak electrical demand of commercial buildings can be drastically reduced and justify the development of a predictive optimal controller that accounts for uncertainty in predicted variables and modeling mismatch in real time.},
+  doi       = {10.1016/j.ijthermalsci.2003.06.001},
+  file      = {Henze2004.pdf:Henze2004.pdf:PDF},
+  groups    = {single house forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.27},
+}
+
+@Book{Holter1979,
+  Title                    = {Fysik og energi ressurser},
+  Author                   = {{\O}ivin Holter and Finn Ingerbretsen and Hugo Parr},
+  Publisher                = {Universistetsforlaget},
+  Year                     = {1979},
+
+  Address                  = {Olso, Norway},
+
+  Owner                    = {pb},
+  Timestamp                = {2010.10.04}
+}
+
+@InProceedings{Horvat2013,
+  author    = {Horvat, A. and Plavsic, T. and Kuzle, I.},
+  title     = {Application of extreme value theory for measuring risk of safe operation of power system in Adriatic wind conditions},
+  booktitle = {EUROCON, 2013 IEEE},
+  year      = {2013},
+  pages     = {874-881},
+  month     = {July},
+  abstract  = {Wind power production in Adriatic wind conditions, especially in Croatian Control Area, is extremely dependent on variable wind conditions and therefore very unpredictable. Accurate wind power forecasting is important to ensure safe power system operation and safe electricity supply. Special attention has been paid to application of extreme value theory in defining, evaluating and measuring risk of wind power forecasting and therefore risk of safe power system planning, operation and control.},
+  doi       = {10.1109/EUROCON.2013.6625086},
+  file      = {Horvat2013.pdf:Horvat2013.pdf:PDF},
+  groups    = {EVT},
+  keywords  = {load forecasting;power system control;power system planning;wind power;Adriatic wind conditions;extreme value theory;safe electricity supply;safe power system control;safe power system operation;safe power system planning;variable wind conditions;wind power forecasting;wind power production;Forecasting;Production;Reactive power;Wind forecasting;Wind power generation;control;extreme value theory;forecasting;operation;planning;power system;risk;wind power},
+  owner     = {pb},
+  timestamp = {2014.03.27},
+}
+
+@Book{Huber2003,
+  Title                    = {Robust Statistics},
+  Author                   = {Huber, Peter J.},
+  Publisher                = {John Wiley \& Sons},
+  Year                     = {2003},
+
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2013.11.08}
+}
+
+@Manual{ICT-REEB2009,
+  Title                    = {ICT for a low carbon society - Smart Buildings},
+  Author                   = {ICT-REEB},
+  Month                    = {July},
+  Year                     = {2009},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.11},
+  Url                      = {http://ec.europa.eu/ictforsg ISBN 978-92-79-12977-3}
+}
+
+@Booklet{Heating2012,
+  title       = {Solar Heat Worldwide},
+  author      = {{IEA - The Solar Heating and Cooling Programme}},
+  year        = {2010},
+  groups      = {phdthesis},
+  lastchecked = {2012-04-03},
+  owner       = {pb},
+  timestamp   = {2012.04.03},
+  url         = {http://www.iea-shc.org},
+}
+
+@Book{IEA2011,
+  title     = {Solar Energy Perspectives - Executive Summary},
+  publisher = {OECD Publishing},
+  year      = {2011},
+  author    = {{IEA, International Energy Agency}},
+  doi       = {http://dx.doi.org/10.1787/9789264124585-en},
+  file      = {IEA2011.pdf:IEA2011.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.03.31},
+}
+
+@TechReport{IEA2011a,
+  author      = {{IEA, International Energy Agency}},
+  title       = {World Energy Statistics},
+  institution = {ESDS International, University of Manchester},
+  year        = {2011},
+  doi         = {http://dx.doi.org/10.5257/iea/world.energy.stats/2011},
+  groups      = {phdthesis},
+  owner       = {pb},
+  timestamp   = {2012.04.02},
+}
+
+@Article{Pierre2006,
+  author    = {Pierre Ineichen},
+  title     = {Comparison of eight clear sky broadband models against 16 independent data banks},
+  journal   = {Solar Energy},
+  year      = {2006},
+  volume    = {80},
+  number    = {4},
+  pages     = {468-478},
+  issn      = {0038-092X},
+  abstract  = {A selection of eight high performance clear sky solar irradiance models is evaluated against a set of 16 independent data banks covering 20 years/stations, altitudes from sea level to 1600&#xa0;m and a large range of different climates. Their performance evaluated on very clear condition measurements are within 4% in term of standard deviation. The conclusions are that the accuracy of the input parameters such as the turbidity is crucial in the validity of the obtained radiation components, and that the choice of a specific model is secondary. The model selection criteria should be based upon either implementation simplicity, input parameter availability (Linke turbidity or aerosol optical depth) or the capacity of the model to produce spectral radiation.},
+  doi       = {10.1016/j.solener.2005.04.018},
+  file      = {Pierre2006.pdf:Pierre2006.pdf:PDF},
+  groups    = {correction},
+  keywords  = {Clear sky irradiance},
+  owner     = {pb},
+  timestamp = {2012.03.17},
+}
+
+@Article{Isaac2009,
+  author    = {Isaac and Moradi},
+  title     = {Quality control of global solar radiation using sunshine duration hours},
+  journal   = {Energy},
+  year      = {2009},
+  volume    = {34},
+  number    = {1},
+  pages     = {1-6},
+  issn      = {0360-5442},
+  abstract  = {The aim of this study was to develop a new and automatic method for controlling the quality of daily global solar radiation, Gd, using sunshine duration hours. The new method has three levels of tests: first, Gd is compared against daily extraterrestrial radiation that is received on a horizontal surface; second, Gd should only exceed by a small amount of the daily clear sky irradiation that is observed under highly transparent clear skies (Gd&lt;1.1Gcd); and third, the method uses a series of persistence checks that utilize the relation between daily global solar radiation and relative sunshine duration hours. The method is capable of identifying systematic and non-systematic errors and its ability has been shown in three different climates including semi-arid, coastal humid and very arid climates.},
+  doi       = {10.1016/j.energy.2008.09.006},
+  file      = {Isaac2009.pdf:Isaac2009.pdf:PDF},
+  groups    = {correction},
+  keywords  = {Quality control},
+  owner     = {pb},
+  timestamp = {2012.02.14},
+}
+
+@Article{Iversen2014,
+  author    = {Iversen, {Jan Emil Banning} and {Morales González}, {Juan Miguel} and Henrik Madsen},
+  title     = {Optimal charging of an electric vehicle using a Markov decision process},
+  journal   = {Applied Energy},
+  year      = {2014},
+  volume    = {123},
+  pages     = {1--12},
+  issn      = {0306-2619},
+  doi       = {10.1016/j.apenergy.2014.02.003},
+  file      = {Iversen2014.pdf:Iversen2014.pdf:PDF},
+  groups    = {Other models},
+  keywords  = {Electric vehicles, Driving patterns, Optimal charging, Markov processes, Stochastic dynamic programming},
+  owner     = {pb},
+  publisher = {Pergamon},
+  timestamp = {2014.09.04},
+}
+
+@Book{Izenman2008,
+  Title                    = {Modern multivariate statistical techniques: regression, classification, and manifold learning},
+  Author                   = {Izenman, Alan J},
+  Publisher                = {Springer},
+  Year                     = {2008},
+
+  File                     = {Izenman2008.pdf:Izenman2008.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2013.11.20}
+}
+
+@Article{Jacobson2011,
+  author    = {Mark Z. Jacobson and Mark A. Delucchi},
+  title     = {Providing all global energy with wind, water, and solar power, Part I: Technologies, energy resources, quantities and areas of infrastructure, and materials},
+  journal   = {Energy Policy},
+  year      = {2011},
+  volume    = {39},
+  number    = {3},
+  pages     = {1154-1169},
+  issn      = {0301-4215},
+  abstract  = {Climate change, pollution, and energy insecurity are among the greatest problems of our time. Addressing them requires major changes in our energy infrastructure. Here, we analyze the feasibility of providing worldwide energy for all purposes (electric power, transportation, heating/cooling, etc.) from wind, water, and sunlight (WWS). In Part I, we discuss WWS energy system characteristics, current and future energy demand, availability of WWS resources, numbers of WWS devices, and area and material requirements. In Part II, we address variability, economics, and policy of WWS energy. We estimate that ∼3,800,000 5&#xa0;MW wind turbines, ∼49,000 300&#xa0;MW concentrated solar plants, ∼40,000 300&#xa0;MW solar PV power plants, ∼1.7 billion 3&#xa0;kW rooftop PV systems, ∼5350 100&#xa0;MW geothermal power plants, ∼270 new 1300&#xa0;MW hydroelectric power plants, ∼720,000 0.75&#xa0;MW wave devices, and ∼490,000 1&#xa0;MW tidal turbines can power a 2030 WWS world that uses electricity and electrolytic hydrogen for all purposes. Such a WWS infrastructure reduces world power demand by 30% and requires only ∼0.41% and ∼0.59% more of the world's land for footprint and spacing, respectively. We suggest producing all new energy with WWS by 2030 and replacing the pre-existing energy by 2050. Barriers to the plan are primarily social and political, not technological or economic. The energy cost in a WWS world should be similar to that today.},
+  doi       = {10.1016/j.enpol.2010.11.040},
+  file      = {Jacobson2011.pdf:Jacobson2011.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Wind power},
+  owner     = {pb},
+  timestamp = {2012.04.02},
+}
+
+@Article{Jain2014,
+  Title                    = {Forecasting energy consumption of multi-family residential buildings using support vector regression: Investigating the impact of temporal and spatial monitoring granularity on performance accuracy },
+  Author                   = {Rishee K. Jain and Kevin M. Smith and Patricia J. Culligan and John E. Taylor},
+  Journal                  = {Applied Energy },
+  Year                     = {2014},
+  Number                   = {0},
+  Pages                    = {168 - 178},
+  Volume                   = {123},
+
+  Abstract                 = {Abstract Buildings are the dominant source of energy consumption and environmental emissions in urban areas. Therefore, the ability to forecast and characterize building energy consumption is vital to implementing urban energy management and efficiency initiatives required to curb emissions. Advances in smart metering technology have enabled researchers to develop “sensor based” approaches to forecast building energy consumption that necessitate less input data than traditional methods. Sensor-based forecasting utilizes machine learning techniques to infer the complex relationships between consumption and influencing variables (e.g., weather, time of day, previous consumption). While sensor-based forecasting has been studied extensively for commercial buildings, there is a paucity of research applying this data-driven approach to the multi-family residential sector. In this paper, we build a sensor-based forecasting model using Support Vector Regression (SVR), a commonly used machine learning technique, and apply it to an empirical data-set from a multi-family residential building in New York City. We expand our study to examine the impact of temporal (i.e., daily, hourly, 10&#xa0;min intervals) and spatial (i.e., whole building, by floor, by unit) granularity have on the predictive power of our single-step model. Results indicate that sensor based forecasting models can be extended to multi-family residential buildings and that the optimal monitoring granularity occurs at the by floor level in hourly intervals. In addition to implications for the development of residential energy forecasting models, our results have practical significance for the deployment and installation of advanced smart metering devices. Ultimately, accurate and cost effective wide-scale energy prediction is a vital step towards next-generation energy efficiency initiatives, which will require not only consideration of the methods, but the scales for which data can be distilled into meaningful information. },
+  Doi                      = {http://dx.doi.org/10.1016/j.apenergy.2014.02.057},
+  File                     = {Jain2014.pdf:Jain2014.pdf:PDF},
+  ISSN                     = {0306-2619},
+  Keywords                 = {Forecasting},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.23},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0306261914002013}
+}
+
+@InProceedings{Jansen2012,
+  Title                    = {Impact of control reserve provision of wind farms on regulating power costs and balancing energy prices},
+  Author                   = {Jansen, Malte and Speckmann, Markus and Schwinn, Rainer},
+  Booktitle                = {Proceedings Wind Integration Workshop},
+  Year                     = {2012},
+
+  File                     = {Jansen2012.pdf:Jansen2012.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2015.08.31}
+}
+
+@Book{Jazwinski1970,
+  Title                    = {Stochastic process and filtering theory},
+  Author                   = {Jazwinski, A. H.},
+  Publisher                = {Academic Press, New York},
+  Year                     = {1970},
+
+  Owner                    = {pb},
+  Timestamp                = {2013.07.01}
+}
+
+@Article{Jensen1995,
+  author                     = {Jensen, S.O.},
+  title                      = {Validation of building energy simulation programs - a methodology},
+  journal                    = {Energy and Buildings},
+  year                       = {1995},
+  volume                     = {22},
+  number                     = {2},
+  pages                      = {133-144},
+  issn                       = {0378-7788},
+  abstract                   = {The PASSYS project was formed in 1986 by the Commission of the European Communities with the aim of increasing confidence in passive solar heating systems. One of the ways chosen to do this was the approval/development of a European validation methodology for building energy simulation programs. The article summarizes the findings from the work of the Model Validation and Development Subgroup within PASSYS. A detailed description of the work may be found in the following two references: S.O. Jensen (ed.), The PASSYS project phase 1, Subgroup model validation and development, Final report, 1986-1989, EUR 13034 EN, Commission of the European Communities, Directorate General XII for Science, Research and Development, Brussels, Belgium, 1990; and S.O. Jensen (ed.), Validation of building energy simulation programs, Vols. 1 and 2, Research report PASSYS subgroup model validation and development, EUR 15115 EN, Commission of the European Communities, Directorate General XII for Science, Research and Development, Brussels, Belgium, 1994.},
+  file                       = {Jensen1995.pdf:Jensen1995.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {0},
+  owner                      = {pb},
+  times-cited                = {15},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1995RG86800005},
+}
+
+@Article{Ji2011,
+  author    = {Ji, Wu and Chee, Keong Chan},
+  title     = {Prediction of hourly solar radiation using a novel hybrid model of ARMA and TDNN},
+  journal   = {Solar Energy},
+  year      = {2011},
+  volume    = {85},
+  number    = {5},
+  pages     = {808-817},
+  issn      = {0038092x},
+  abstract  = {In this work, a new approach that contains two phases is used to predict the hourly solar radiation series. In the detrending phase, several models are applied to remove the non-stationary trend lying in the solar radiation series. To judge the goodness of different detrending models, the Augmented Dickey?Fuller method is applied to test the stationarity of the residual. The optimal model is used to detrend the solar radiation series. In the prediction phase, the Autoregressive and Moving Average (ARMA) model is used to predict the stationary residual series. Furthermore, the controversial Time Delay Neural Network (TDNN) is applied to do the prediction. Because ARMA and TDNN have their own strength respectively, a novel hybrid model that combines both the ARMA and TDNN, is applied to produce better prediction. The simulation result shows that this hybrid model can take the advantages of both ARMA and TDNN and give excellent result.},
+  doi       = {10.1016/j.solener.2011.01.013},
+  file      = {Ji2011.pdf:Ji2011.pdf:PDF},
+  groups    = {forecasting},
+  owner     = {pb},
+  timestamp = {2011.09.16},
+}
+
+@Article{Jimenez2005,
+  author                     = {Jimenez, MJ and Heras, MR},
+  title                      = {Application of multi-output ARX models for estimation of the U and g values of building components in outdoor testing},
+  journal                    = {SOLAR ENERGY},
+  year                       = {2005},
+  volume                     = {79},
+  number                     = {3},
+  pages                      = {302-310},
+  issn                       = {0038-092X},
+  abstract                   = {This paper presents the application of multi-output ARX (Auto-Regression with eXtra inputs) models to estimation of the U values of building components tested outdoors. Multi-output models make it possible to take interaction of the physical quantities involved in the process analysed into account. This approach has been used to analyse two quite different building components. The tests were carried out in one of the PASSYS test cells at the CIEMAT's ``Plataforma Solar de Almeria (PSA){''} in Tabernas (Almeria, Spain). Several data sets were studied for each of these components using multi-output and single-output models, as well as theoretical values, and the results were compared. Very good performance of the multi-output models was observed for the samples analysed. (c) 2004 Elsevier Ltd. All rights reserved.},
+  doi                        = {10.1016/j.solener.2004.10.008},
+  file                       = {Jimenez2005.pdf:Jimenez2005.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {12},
+  owner                      = {pb},
+  times-cited                = {2},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000232464300008},
+}
+
+@Article{Jimenez2008a,
+  author    = {Jiménez, M.J. and Madsen, H.},
+  title     = {Models for describing the thermal characteristics of building components},
+  journal   = {Building and Environment},
+  year      = {2008},
+  volume    = {43},
+  number    = {2},
+  pages     = {152-162},
+  issn      = {03601323, 1873684x},
+  abstract  = {Outdoor testing of buildings and building components under real weather conditions provides useful information about their dynamic performance. Such knowledge is needed to properly characterize the heat transfer dynamics and provides useful information for implementing energy saving strategies, for example. For the analysis of these tests, dynamic analysis models and methods are required. However, a wide variety of models and methods exists, and the problem of choosing the most appropriate approach for each particular case is a non-trivial and interdisciplinary task. Knowledge of a large family of these approaches may therefore be very useful for selecting a suitable approach for each particular case. This paper presents an overview of models that can be applied for modelling the thermal characteristics of buildings and building components using data from outdoor testing. The choice of approach depends on the purpose of the modelling, existence of prior physical knowledge, the data and the available statistical tools. In this paper, a variety of models are outlined and compared, and a strong relationship among a large number of widely used linear and stationary stochastic models is mathematically demonstrated. The characteristics of each type of model are highlighted. Some available software tools for each of the methods described will be mentioned. A case study also demonstrating the difference between linear and nonlinear models is considered. © 2006 Elsevier Ltd. All rights reserved.},
+  doi       = {10.1016/j.buildenv.2006.10.029},
+  file      = {Jimenez2008a.pdf:Jimenez2008a.pdf:PDF},
+  groups    = {PerformanceKPI},
+  owner     = {pb},
+  timestamp = {2011.12.13},
+}
+
+@Article{Jimenez2008b,
+  author    = {Jiménez, M.J. and Madsen, H. and Andersen, K.K.},
+  title     = {Identification of the main thermal characteristics of building components using {MATLAB}},
+  journal   = {Building and Environment},
+  year      = {2008},
+  volume    = {43},
+  number    = {2},
+  pages     = {170-180},
+  issn      = {03601323, 1873684x},
+  abstract  = {This paper presents the application of the IDENT Graphical User Interface of MATLAB to estimate the thermal properties of building components from outdoor dynamic testing, imposing appropriate physical constraints and assuming linear and time invariant parametric models. The theory is briefly described to provide the background for a first understanding of the models used. The relationship between commonly used RC-network models and the parametric models proposed is presented. The analysis is generalised for different possibilities in the assignment of inputs and outputs and even multiple output. Step by step guidance illustrated by an example is included. Results obtained using the different possibilities in selecting inputs and outputs are reported. © 2006 Elsevier Ltd. All rights reserved.},
+  doi       = {10.1016/j.buildenv.2006.10.030},
+  file      = {Jimenez2008b.pdf:Jimenez2008b.pdf:PDF},
+  groups    = {single house forecasting, PerformanceKPI},
+  owner     = {pb},
+  timestamp = {2011.12.13},
+}
+
+@Article{Jonsson2012,
+  author    = {Jónsson, T. and Pinson, P. and Nielsen, H. Aa. and Madsen, H. and Nielsen, T.S.},
+  title     = {Forecasting Electricity Spot Prices Accounting for Wind Power Predictions},
+  journal   = {IEEE Transactions on Sustainable Energy},
+  year      = {2012},
+  volume    = {Submitted},
+  pages     = {{}},
+  file      = {Jonsson2012.pdf:Jonsson2012.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2012.04.20},
+}
+
+@Article{Journee2011,
+  author    = {Michel Journée and Cédric Bertrand},
+  title     = {{Quality control of solar radiation data within the RMIB solar measurements network}},
+  journal   = {Solar Energy},
+  year      = {2011},
+  volume    = {85},
+  number    = {1},
+  pages     = {72-86},
+  issn      = {0038-092X},
+  abstract  = {Assessment of the solar resource is based upon measured data, where available. However, with any measurement there exist errors. Consequently, solar radiation data do not exhibit necessarily the same reliability and it often happens that users face time series of measurements containing questionable values though preliminary technical control has been done before the data release. To overcome such a situation, a major effort has been undertaken at the Royal Meteorological Institute of Belgium (RMIB) to develop procedures and software for performing post-measurement quality control of solar data from the radiometric stations of our in situ solar monitoring network. Moreover, because solar energy applications usually need continuous time series of solar radiation data, additional procedures have also been established to fill missing values (data initially lacking or removed via quality checks).},
+  doi       = {10.1016/j.solener.2010.10.021},
+  file      = {Journee2011.pdf:Journee2011.pdf:PDF},
+  groups    = {correction},
+  keywords  = {Global, direct and diffused solar radiation},
+  owner     = {pb},
+  timestamp = {2012.02.14},
+}
+
+@Manual{Juhl2016,
+  Title                    = {Continuous Time Stochastic Modeling in R},
+  Author                   = {Juhl, R. and Kristensen, N. R. and Madsen, H.},
+  Organization             = {DTU Compute},
+  Year                     = {2016},
+
+  Owner                    = {pb},
+  Timestamp                = {2016.07.29},
+  Url                      = {ctsm.info}
+}
+
+@InProceedings{Joergensen2011,
+  author    = {Jørgensen, J.M. and Sørensen, S.H. and Behnke, K. and Eriksen, P.B.},
+  title     = {EcoGrid EU - 2014; A prototype for European Smart Grids},
+  booktitle = {Power and Energy Society General Meeting, 2011 IEEE},
+  year      = {2011},
+  pages     = {1-7},
+  month     = {july},
+  abstract  = {The EcoGrid EU is a large-scale demonstration on the Danish island Bornholm. The aim is to demonstrate a Smart Grids solution to operate a power system with more than 50 % renewable energy, including a mix of variable distributed energy resources (i.e. wind, solar, biomass, biogas, and CHP) and energy storage technologies such as heat pumps, district heating and batteries from EVs. Out of the 28 000 electricity customers on Bornholm, 2000 residential consumers will participate with flexible demand. A major part of the participants will be equipped with residential demand response devices with intelligent controllers, enabling customers to respond to real-time prices and allow users to pre-program their automatic demand-response preferences. This paper presents the concept of a real-time market and information architecture, which gives numerous small end-users and local producers of distributed energy new options for offering TSO additional balancing and ancillary services.},
+  doi       = {10.1109/PES.2011.6038981},
+  file      = {Joergensen2011.pdf:Joergensen2011.pdf:PDF},
+  groups    = {phdthesis},
+  issn      = {1944-9925},
+  keywords  = {CHP generation;EcoGrid EU;European smart grids;automatic demand response preference;biogas energy;biomass energy;district heating;electric vehicle;energy storage technology;heat pumps;information architecture;intelligent controller;power system operation;real time market;real time price;renewable energy;solar energy;variable distributed energy resource;wind energy;cogeneration;distributed power generation;district heating;electric vehicles;energy storage;heat pumps;power markets;renewable energy sources;smart power grids;},
+  owner     = {pb},
+  timestamp = {2012.04.01},
+}
+
+@Article{Karlsson2008,
+  Title                    = {Optimal investment paths for future renewable based energy systems—Using the optimisation model Balmorel},
+  Author                   = {Kenneth Karlsson and Peter Meibom},
+  Journal                  = {International Journal of Hydrogen Energy},
+  Year                     = {2008},
+  Number                   = {7},
+  Pages                    = {1777-1787},
+  Volume                   = {33},
+
+  Abstract                 = {This paper investigates a possible long term investment path for the Nordic energy system focussing on renewable energy in the supply sector and on hydrogen as the main fuel for transportation, covering up to 70% of all transport in 2050. The optimisation model Balmorel [Ravn H, et al. Balmorel: A model for analyses of the electricity and CHP markets in the Baltic Sea Region. 〈 www.Balmorel.com 〉 ; 2001. [1]] covering the Nordic energy system is used. The model has been expanded to include the modelling of hydrogen production technologies, storage and hydrogen power plants. The simulation shows that with an oil price at 100 $/barrel, a CO 2 price at 40 € / ton and the assumed penetration of hydrogen in the transport sector, it is economically optimal to cover more than 95% of the primary energy consumption for electricity and district heat by renewables in 2050. When the transport sector is converted as assumed 65% of the transportation relies on renewable energy.},
+  Doi                      = {10.1016/j.ijhydene.2008.01.031},
+  File                     = {Karlsson2008.pdf:Karlsson2008.pdf:PDF},
+  ISSN                     = {0360-3199},
+  Keywords                 = {Renewable energy systems},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.13},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0360319908000888}
+}
+
+@Article{Kloeden1994,
+  Title                    = {Numerical solution of SDE through computer experiments},
+  Author                   = {Kloeden, Peter E and Platen, Eckhard and Schurz, Henri},
+  Year                     = {1994},
+
+  Owner                    = {pb},
+  Publisher                = {Springer Berlin},
+  Timestamp                = {2013.07.04}
+}
+
+@Manual{Koenker2011,
+  Title                    = {quantreg: Quantile Regression},
+  Author                   = {Roger Koenker},
+  Note                     = {R package version 4.76},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2012.01.30},
+  Url                      = {http://CRAN.R-project.org/package=quantreg}
+}
+
+@Book{Koenker2005,
+  Title                    = {Quantile Regression},
+  Author                   = {Roger Koenker},
+  Publisher                = {Cambridge University Press},
+  Year                     = {2005},
+
+  Abstract                 = {Quantile regression is gradually emerging as a unified statistical methodology for estimating models of conditional quantile functions. By complementing the exclusive focus of classical least squares regression on the conditional mean, quantile regression offers a systematic strategy for examining how covariates influence the location, scale and shape of the entire response distribution. This monograph is the first comprehensive treatment of the subject, encompassing models that are linear and nonlinear, parametric and nonparametric. The author has devoted more than 25 years of research to this topic. The methods in the analysis are illustrated with a variety of applications from economics, biology, ecology and finance. The treatment will find its core audiences in econometrics, statistics, and applied mathematics in addition to the disciplines cited above.},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.25}
+}
+
+@Article{Koeppel2006,
+  Title                    = {Using storage devices for compensating uncertainties caused by non-dispatchable generators},
+  Author                   = {Koeppel, Gaudenz and Korpas, Magnus},
+  Journal                  = {2006 International Conference on Probabilistic Methods Applied to Power Systems},
+  Year                     = {2006},
+  Pages                    = {1-8},
+
+  Abstract                 = {This paper presents a study on combining a grid-connected stochastic generator with an energy storage device. The storage device is used to balance the power fluctuations of the non-dispatchable generator in order to feed power into the network according to an hourly pre-determined constant profile. The negative effects of inaccurate forecasts are thus reduced by turning the stochastic generation into a deterministic network infeed. Important parameters for achieving this goal are the energy capacity of the energy storage and the error magnitude of the forecast used to define the infeed profile. A general method for simulations based on a measurement series is presented, together with a method for the simulation of forecasts with different forecast errors. The methodology is then applied in a case study to investigate the feasibility of using an storage device for hourly balancing. The emphasis is on the relation between infeed accuracy, forecast error and energy capacity of the storage},
+  File                     = {:/home/pb/literature/articles/using_storage_devices_for_compensating_uncertainties_caused_by_non_dispatchable_generators.pdf:PDF},
+  ISBN                     = {9789171785855},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.25}
+}
+
+@Article{Kong2012,
+  Title                    = {An improved dynamic test method for solar collectors},
+  Author                   = {Kong, Weiqiang and Wang, Zhifeng and Fan, Jianhua and Bacher, Peder and Perers, Bengt and Chen, Ziqian and Furbo, Simon},
+  Journal                  = {Solar Energy},
+  Year                     = {2012},
+  Number                   = {6},
+  Pages                    = {1838-1848},
+  Volume                   = {86},
+
+  Abstract                 = {A comprehensive improvement of the mathematical model for the so called transfer function method is presented in this study. This improved transfer function method can estimate the traditional solar collector parameters such as zero loss coefficient and heat loss coefficient. Two new collector parameters t and mfCf are obtained. t is a time scale parameter which can indicate the heat transfer ability of the solar collector. mfCf can be used to calculate the fluid volume content in the solar collector or to validate the regression process by comparing it to the physical fluid volume content if known. Experiments were carried out under dynamic test conditions and then test data were processed using multi-linear regression method to get collector parameters with statistic analysis. A comparison of the collector parameters obtained from the improved transfer function (ITF) method and the quasi-dynamic test (QDT) method is carried out. The results show that the improved transfer function method can accurately obtain reasonable collector parameters. The influence of different averaging time intervals is investigated. Based on the investigation it is recommended to use on line calculation if applicable for the second-order differential term with 6–9min as the best averaging time interval. The measured and predicted collector power output of the solar collector are compared during a test of 13days continuously both for the ITF method and the QDT method. The maximum and averaging error is 53.87W/m2 and 5.22W/m2 respectively of the ITF method while 64.13W/m2 and 6.22W/m2 of the QDT method. Scatter and relative error distribution of the measured power output versus the predicted power output is also plotted for the two methods. No matter in either error analysis or scatter distribution, the ITF method is more accurate than the QDT method in predicting the power output of a solar collector.In conclusion, all the results show that the improved transfer function method can accurately and robustly estimate solar collector parameters and predict solar collector thermal performance under dynamic test conditions.},
+  Doi                      = {10.1016/j.solener.2012.03.002},
+  File                     = {Kong2012.pdf:Kong2012.pdf:PDF},
+  ISSN                     = {0038092x},
+  Owner                    = {pb},
+  Timestamp                = {2013.01.17}
+}
+
+@Article{Kosny2002,
+  author                     = {Kosny, J and Kossecka, E},
+  title                      = {Multi-dimensional heat transfer through complex building envelope assemblies in hourly energy simulation programs},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2002},
+  volume                     = {34},
+  number                     = {5},
+  pages                      = {445-454},
+  month                      = {JUN},
+  issn                       = {0378-7788},
+  abstract                   = {In most whole building thermal modeling computer programs like DOE-2, BLAST, or ENERGY PLUS simplified, one-dimensional, parallel path, descriptions of building envelope are used, For several structural and material configurations of building envelope components containing high thermal mass and/or two- and three-dimensional thermal bridges, one-dimensional analysis may generate serious errors in building loads estimation. The method of coupling three-dimensional heat transfer modeling and dynamic hot-box tests for complex wall systems with the whole building thermal simulations is presented in this paper. This procedure can increase the accuracy of the whole building thermal modeling. Current thermal modeling and calculation procedures tend to overestimate the actual field thermal performance of today's popular building envelope designs, which utilize modem building technologies (sometimes highly conductive structural materials) and feature large fenestration areas and floor plans with many exterior wall corners. Some widely used computer codes were calibrated using field data obtained from light weight wood frame buildings. The same codes are used now for thermal modeling of high mass buildings with significant heat accumulation effects. Also. the effects of extensive thermal shorts on the whole building thermal performance is not accurately reflected by the commonly used one-dimensional energy simulations that are the current bases for building envelopes and systems designing. (C) 2002 Elsevier Science B.V. All rights reserved.},
+  file                       = {Kosny2002.pdf:Kosny2002.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {15},
+  owner                      = {pb},
+  times-cited                = {20},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000175636100003},
+}
+
+@Article{Kossecka2002,
+  author                     = {Kossecka, E and Kosny, J},
+  title                      = {Influence of insulation configuration on heating and cooling loads in a continuously used building},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2002},
+  volume                     = {34},
+  number                     = {4},
+  pages                      = {321-331},
+  month                      = {MAY},
+  issn                       = {0378-7788},
+  abstract                   = {This paper is focused on the energy performance of buildings containing massive exterior building envelope components. The effect of mass and insulation location on heating and cooling loads is analyzed for six characteristic wall configurations. Correlations between structural and dynamic thermal characteristics of walls are discussed. A simple one-room model of a building exposed to periodic temperature changes is analyzed to illustrate the effect of material configuration on the ability of a wall to dampen interior temperature swings. Whole-building dynamic modeling using DOE-2.1E is employed for the energy analysis of a one-story residential building with various exterior wall configurations for six different US climates. The best thermal performance is obtained when massive material layers are located at the inner side and directly exposed to the interior space. (C) 2002 Elsevier Science B.V. All rights reserved.},
+  file                       = {Kossecka2002.pdf:Kossecka2002.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {24},
+  owner                      = {pb},
+  times-cited                = {18},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000173945600002},
+}
+
+@Book{Kotz1988,
+  title     = {Multivariate Extreme-Value Theory},
+  publisher = {Wiley Online Library},
+  year      = {1988},
+  author    = {Kotz, Samuel and Nadarajah, Saraless},
+  groups    = {EVT},
+  owner     = {pb},
+  timestamp = {2014.04.10},
+}
+
+@Book{Larsen2015,
+  Title                    = {DTU International Energy Report 2015: Energy systems integration for the transition to non-fossil energy systems},
+  Author                   = {Larsen, {Hans Hvidtfeldt} and {Sønderberg Petersen}, Leif},
+  Publisher                = {Technical University of Denmark},
+  Year                     = {2015},
+
+  ISBN                     = {978-87-550-3970-4},
+  Owner                    = {pbac},
+  Timestamp                = {2016.11.08}
+}
+
+@InProceedings{Lethe2014,
+  Title                    = {An adapted co-heating test and experimental infrastructure for thermal dynamic response and performance identification of residential buildings},
+  Author                   = {Lethé, Guillaume and Steskens, Paul and Flamant, Gilles and Meurisse, Brieuc},
+  Booktitle                = {9th SSB Conference, 10-12 December 2014, Liège},
+  Year                     = {2014},
+
+  File                     = {Lethe2014.pdf:Lethe2014.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2015.03.05}
+}
+
+@Article{Lew2010,
+  Title                    = {Impact of high solar penetration in the western interconnection},
+  Author                   = {Lew, D. and Miller, N. and Clark, K. and Jordan, G. and Gao, Z.},
+  Journal                  = {Contract},
+  Year                     = {2010},
+  Pages                    = {275-3000},
+  Volume                   = {303},
+
+  File                     = {Lew2010.pdf:Lew2010.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.03.31}
+}
+
+@Article{Lewis2006,
+  author    = {Lewis, Nathan S. and Nocera, Daniel G.},
+  title     = {Powering the planet: Chemical challenges in solar energy utilization},
+  journal   = {Proceedings of the National Academy of Sciences},
+  year      = {2006},
+  volume    = {103},
+  number    = {43},
+  pages     = {15729-15735},
+  abstract  = {Global energy consumption is projected to increase, even in the face of substantial declines in energy intensity, at least 2-fold by midcentury relative to the present because of population and economic growth. This demand could be met, in principle, from fossil energy resources, particularly coal. However, the cumulative nature of CO2 emissions in the atmosphere demands that holding atmospheric CO2 levels to even twice their preanthropogenic values by midcentury will require invention, development, and deployment of schemes for carbon-neutral energy production on a scale commensurate with, or larger than, the entire present-day energy supply from all sources combined. Among renewable energy resources, solar energy is by far the largest exploitable resource, providing more energy in 1 hour to the earth than all of the energy consumed by humans in an entire year. In view of the intermittency of insolation, if solar energy is to be a major primary energy source, it must be stored and dispatched on demand to the end user. An especially attractive approach is to store solar-converted energy in the form of chemical bonds, i.e., in a photosynthetic process at a year-round average efficiency significantly higher than current plants or algae, to reduce land-area requirements. Scientific challenges involved with this process include schemes to capture and convert solar energy and then store the energy in the form of chemical bonds, producing oxygen from water and a reduced fuel such as hydrogen, methane, methanol, or other hydrocarbon species.},
+  doi       = {10.1073/pnas.0603395103},
+  eprint    = {http://www.pnas.org/content/103/43/15729.full.pdf+html},
+  file      = {Lewis2006.pdf:Lewis2006.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.04.02},
+}
+
+@Article{Liisberg2016,
+  Title                    = {Hidden Markov Models for indirect classification of occupant behaviour},
+  Author                   = {Liisberg, J. and Møller, J.K. and Bloem, H. and Cipriano, J. and Mor, G. and Madsen, H.},
+  Journal                  = {Sustainable Cities and Society},
+  Year                     = {2016},
+  Pages                    = {83-98},
+  Volume                   = {27},
+
+  Abstract                 = {Even for similar residential buildings, a huge variability in the energy consumption can be observed. This variability is mainly due to the different behaviours of the occupants and this impacts the thermal (temperature setting, window opening, etc.) as well as the electrical (appliances, TV, computer, etc.) consumption.It is very seldom to find direct observations of occupant presence and behaviour in residential buildings. However, given the increasing use of smart metering, the opportunity and potential for indirect observation and classification of occupants’ behaviour is possible. This paper focuses on the use of Hidden Markov Models (HMMs) to create methods for indirect observations and characterisation of occupant behaviour.By applying homogeneous HMMs on the electricity consumption of fourteen apartments, three states describing the data were found suitable. The most likely sequence of states was determined (global decoding). From reconstruction of the states, dependencies like ambient air temperature were investigated. Combined with an occupant survey, this was used to classify/interpret the states as (1) absent or asleep, (2) home, medium consumption and (3) home, high consumption. From the global decoding, the average probability profiles with respect to time of day were investigated, and four distinct patterns of occupant behaviour were observed. Based on the initial results of the homogeneous HMMs and with the observed dependencies, time dependent HMMs (inhomogeneous HMMs) were developed, which improved forecasting. For both the homogeneous and inhomogeneous HMMs, indications of common parameters were observed, which suggests further development of the HMMs as population models.},
+  Doi                      = {10.1016/j.scs.2016.07.001},
+  File                     = {Liisberg2016.pdf:Liisberg2016.pdf:PDF},
+  ISSN                     = {22106715, 22106707},
+  Language                 = {eng},
+  Owner                    = {pbac},
+  Timestamp                = {2016.10.10}
+}
+
+@MastersThesis{Linnet2005,
+  author    = {Linnet, Ulfar},
+  title     = {Tools supporting wind energy trade in deregulated markets},
+  school    = {Technical University of Denmark, DTU, DK-2800 Kgs. Lyngby, Denmark},
+  year      = {2005},
+  file      = {Linnet2005.pdf:Linnet2005.pdf:PDF},
+  groups    = {OptimalBidding},
+  owner     = {pb},
+  timestamp = {2015.12.18},
+}
+
+@Article{Lodi2012,
+  author    = {Lodi, C. and Bacher, P. and Cipriano, J. and Madsen, H.},
+  title     = {Modelling the heat dynamics of a monitored Test Reference Environment for Building Integrated Photovoltaic systems using stochastic differential equations},
+  journal   = {Energy \& Buildings},
+  year      = {2012},
+  volume    = {50},
+  pages     = {273-281},
+  issn      = {03787788},
+  abstract  = {This paper deals with grey-box modelling of the energy transfer of a double skin Building Integrated Photovoltaic (BIPV) system. Grey-box models are based on a combination of prior physical knowledge and statistics, which enable identification of the unknown parameters in the system and accurate prediction of the most influential variables. The experimental data originates from tests carried out with an air-based BIPV system installed in a Test Reference Environment. BIPV systems represent an interesting application for achieving the requirements of the EU EPBD Directive. Indeed, these systems could reduce the ventilation thermal losses of the building by pre-heating the fresh air. Furthermore, by decreasing PV module temperature, the ventilation air heat extraction can simultaneously increase electrical and thermal energy production of the building. A correct prediction of the PV module temperature and heat transfer coefficients is fundamental in order to improve the thermo-electrical production.The considered grey-box models are composed of a set of continuous time stochastic differential equations, holding the physical description of the system, combined with a set of discrete time measurement equations, which represent the data driven part.In the present work, both one-state and two-state non-linear grey-box models are considered. In order to validate the results, the residuals are analysed for white-noise properties.},
+  doi       = {10.1016/j.enbuild.2012.03.046},
+  file      = {Lodi2012.pdf:Lodi2012.pdf:PDF},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2013.01.17},
+}
+
+@Article{Lomas1997,
+  author                     = {Lomas, KJ and Eppel, H and Martin, CJ and Bloomfield, DP},
+  title                      = {Empirical validation of building energy simulation programs},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {1997},
+  volume                     = {26},
+  number                     = {3},
+  pages                      = {253-275},
+  issn                       = {0378-7788},
+  abstract                   = {The largest-ever exercise to validate dynamic thermal simulation programs (DSPs) of buildings has recently been completed, It involved 25 program/user combinations from Europe, the USA and Australia, and included both commercial and public domain programs, Predictions were produced for three single zone test rooms in the UK. These had either a single-glazed or double-glazed south-facing window, or no window at all. In one 10-day period the rooms were intermittently heated and in another 10-day period they were unheated. The predictions of heating energy demands and air temperatures were compared. The observed interprogram variability was highly likely to be due to inherent differences between the DSPs, rather than the way they were used. Predictions of the difference in performance of two rooms were no more consistent than predictions of the absolute performance of a single room, By comparing the predictions with the measurements and taking due account of experimental uncertainty, the DSPs that are likely to contain significant internal errors are distinguished from those which, in these tests, performed much better. The likely sources of internal error are discussed. It is recommended that empirical validation exercises should consist of an initial blind phase in which program users are unaware of the actual measured performance of the building, and then an open phase in which the measurements are made available. The work has produced five empirical validation benchmarks, which have significant practical benefits for program users, vendors and potential purchasers. There is considerable scope for improving the predictive ability of DSPs and so suggestions for further work are made. (C) 1997 Building Research Establishment. Published by Elsevier Science S.A.},
+  file                       = {Lomas1997.pdf:Lomas1997.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {35},
+  owner                      = {pb},
+  times-cited                = {30},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1997YE99400003},
+}
+
+@InProceedings{Lorenz2007,
+  author    = {Lorenz, E. and Heinemann, D. and Wickramarathne, H. and Beyer, H.G. and Bofinger, S.},
+  title     = {Forecast of Ensemble Power Production by Grid-connected PV Systems},
+  booktitle = {Proc. 20th European PV Conference, September 3-7, 2007, Milano},
+  year      = {2007},
+  abstract  = {The contribution of power production by PV systems to the electricity supply is constantly increasing. Anefficient use of the fluctuating solar power production will highly benefit from forecast information on the expected powerproduction. This forecast information is necessary for the management of the electricity grids and for solar energy trading. This paper will present and evaluate an approach to forecast regional PV power production. The forecast quality was investigated for single systems and for ensembles of distributed PV systems. Due to spatial averaging effects the forecast for an ensemble of distributed systems shows higher quality than the forecast for single systems. Forecast errors are reduced to an RMSE of 0.05 Wh/Wp for an ensemble of the size of Germany compared to a RMSE of 0.13 Wh/Wp for single PV systems. Besides the forecast accuracy, also the specification of the forecast uncertainty is an important issue for an effective application. An approach to derive weather specific confidence intervals is presented that describe the maximum expecteduncertainty of the forecast.},
+  file      = {Lorenz2007.pdf:Lorenz2007.pdf:PDF},
+  groups    = {forecasting},
+  owner     = {pb},
+  timestamp = {2009.02.25},
+}
+
+@Article{Lorenz2009,
+  author    = {Lorenz, E. and Hurka, J. and Heinemann, D. and Beyer, H.G.},
+  title     = {Irradiance Forecasting for the Power Prediction of Grid-Connected Photovoltaic Systems},
+  journal   = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
+  year      = {2009},
+  volume    = {2},
+  number    = {1},
+  pages     = {2-10},
+  issn      = {19391404, 21511535},
+  abstract  = {The contribution of power production by photovoltaic (PV) systems to the electricity supply is constantly increasing. An efficient use of the fluctuating solar power production will highly benefit from forecast information on the expected power production. This forecast information is necessary for the management of the electricity grids and for solar energy trading. This paper presents an approach to predict regional PV power output based on forecasts up to three days ahead provided by the European Centre for Medium-Range Weather Forecasts (ECMWF). Focus of the paper is the description and evaluation of the approach of irradiance forecasting, which is the basis for PV power prediction. One day-ahead irradiance forecasts for single stations in Germany show a rRMSE of 36\%. For regional forecasts, forecast accuracy is increasing in dependency on the size of the region. For the complete area of Germany, the rRMSE amounts to 13\%. Besides the forecast accuracy, also the specification of the forecast uncertainty is an important issue for an effective application. We present and evaluate an approach to derive weather specific prediction intervals for irradiance forecasts. The accuracy of PV power prediction is investigated in a case study.},
+  file      = {Lorenz2009.pdf:Lorenz2009.pdf:PDF},
+  groups    = {forecasting},
+  owner     = {pb},
+  timestamp = {2011.09.16},
+}
+
+@Article{Lorenz2011,
+  author    = {Lorenz, Elke and Scheidsteger, Thomas and Hurka, Johannes and Heinemann, Detlev and Kurz, Christian},
+  title     = {Regional PV power prediction for improved grid integration},
+  journal   = {Progress in Photovoltaics: Research and Applications},
+  year      = {2011},
+  volume    = {19},
+  number    = {7},
+  pages     = {757-771},
+  issn      = {1099-159X},
+  abstract  = {The contribution of power production from PV systems to the electricity supply is constantly increasing. An efficient use of the fluctuating solar power production will highly benefit from forecast information on the expected power production, as a basis for management of the electricity grids and trading on the energy market. We present and evaluate the regional PV power prediction system of University of Oldenburg and Meteocontrol GmbH providing forecasts of up to 2 days ahead with hourly resolution. The proposed approach is based on forecasts of the global model of the European Centre for Medium-Range Forecasts (ECMWF). It includes a post-processing procedure to derive optimised, site-specific irradiance forecasts and explicit physical modelling steps to convert the predicted irradiances to PV power. Finally, regional power forecasts are derived by up-scaling from a representative set of PV systems. The investigation of proper up-scaling is a special focus of this paper. We introduce a modified up-scaling approach, modelling the spatial distribution of the nominal power with a resolution of 1° × 1°. The operational PV power prediction system is evaluated in comparison to the modified up-scaling approach for the control areas of the two German transmission system operators ‘transpower’ and ‘50 Hertz’ for the period 2.7.2009–30.4.2010. rmse values of the operational forecasts are in the range of 4–5% with respect to the nominal power for intra-day and day-ahead forecast horizons. Further improvement is achieved with the modified up-scaling approach. Copyright © 2010 John Wiley & Sons, Ltd.},
+  file      = {Lorenz2011.pdf:Lorenz2011.pdf:PDF},
+  groups    = {forecasting, phdthesis},
+  keywords  = {PV power prediction, grid integration, irradiance prediction, PV simulation},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.04.12},
+}
+
+@PhdThesis{Lundholm2013,
+  Title                    = {Verification of Global Radiation Forecasts from the Ensemble Prediction System at DMI},
+  Author                   = {Sisse Camilla Lundholm},
+  School                   = {The PhD School of Science Faculty of Science University of Copenhagen},
+  Year                     = {2013},
+
+  Owner                    = {pb},
+  Timestamp                = {2013.05.05}
+}
+
+@Book{Madsen2015,
+  Title                    = {Thermal Performance Characterization using Time Series Data - IEA EBC Annex 58 Guidelines},
+  Author                   = {Henrik Madsen and Peder Bacher and Geert Bauwens and An-Heleen Deconinck and Glenn Reynders and Staf Roels and Eline Himpe and Guillaume Lethe},
+  Publisher                = {Technical University of Denmark (DTU)},
+  Year                     = {2015},
+
+  Owner                    = {pb},
+  Timestamp                = {2016.07.29}
+}
+
+@Article{Madsen2005,
+  author    = {Madsen, Henrik and Pinson, Pierre and Kariniotakis, George and Nielsen, Henrik Aa and Nielsen, Torben S},
+  title     = {Standardizing the Performance Evaluation of ShortTerm Wind Power Prediction Models},
+  journal   = {Wind Engineering},
+  year      = {2005},
+  volume    = {29},
+  number    = {6},
+  pages     = {475},
+  issn      = {0309524x},
+  file      = {Madsen2005.pdf:Madsen2005.pdf:PDF},
+  groups    = {Forecasting, wind},
+  owner     = {pb},
+  timestamp = {2009.02.25},
+}
+
+@Book{Madsen2010,
+  title     = {Introduction to General and Generalized Linear Models},
+  publisher = {CRC Press},
+  year      = {2010},
+  author    = {Madsen, H. and Thyregod, P.},
+  groups    = {TheoreticalModeling},
+  owner     = {pb},
+  timestamp = {2010.11.22},
+}
+
+@Article{Marchio1991,
+  author                     = {Marchio, D and Rabl, A},
+  title                      = {Energy-efficient gas-heated housing in france - predicted and observed performance},
+  journal                    = {Energy and Buildings},
+  year                       = {1991},
+  volume                     = {17},
+  number                     = {2},
+  pages                      = {131-139},
+  issn                       = {0378-7788},
+  abstract                   = {This report presents selected results of an analysis of data collected by GdF, the gas utility of France, during a project to monitor the performance of energy-efficient gas-heated houses and apartments in France. The data base includes the performance predicted at the design stage, as well as supplementary information obtained by questionnaire from the residents. 220 residences yielded sufficiently complete and reliable data to permit weather correction to be carried out by PRISM software. The results show, on average, fairly close agreement between the observed consumption C(tot,obs) and the consumption C(tot,the) (predicted according to the French procedure), although with some tendency towards overconsumption (or underprediction), C(tot,obs) being 6\% higher than C(tot,the). Among individual consumers there is much scatter, especially for apartments. Characterized in terms of the r.m.s. variation of the ratio C(tot,obs)/C(tot,the), the scatter is 0.3 for single-family residences and 0.5 for apartments. The scatter appears to be mostly due to behavioural effects, as suggested by the large variation (almost 2 to 1) between highest and lowest consumption in a group of six identical row houses. A detailed comparison between predicted and observed PRISM parameters (heating slope, base level and balance temperature) is presented for a subset of 82 single-family houses where the uncertainties are sufficiently small. Most of the overconsumption arises from differences between predicted and observed values of the base level (hot water and cooking), whereas for space heating prediction and observation agree within one percent. An attempt to correlate the ratio C(tot,obs)/C(tot,the) with data from the questionnaire has remained inconclusive for the most part.},
+  file                       = {Marchio1991.pdf:Marchio1991.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {0},
+  owner                      = {pb},
+  review                     = {Doesn't seem to be interesting concerning modelling techniques.},
+  times-cited                = {2},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1991GC59300005},
+}
+
+@Article{Marquez2011,
+  author    = {Ricardo Marquez and Carlos F.M. Coimbra},
+  title     = {Forecasting of global and direct solar irradiance using stochastic learning methods, ground experiments and the NWS database},
+  journal   = {Solar Energy},
+  year      = {2011},
+  volume    = {85},
+  number    = {5},
+  pages     = {746-756},
+  issn      = {0038-092X},
+  doi       = {DOI: 10.1016/j.solener.2011.01.007},
+  file      = {Marquez2011.pdf:Marquez2011.pdf:PDF},
+  groups    = {NotReadYet, forecasting},
+  keywords  = {Solar irradiance forecasting},
+  owner     = {pb},
+  timestamp = {2011.05.19},
+}
+
+@Article{Mathiesen2011,
+  author    = {Brian Vad Mathiesen and Henrik Lund and Kenneth Karlsson},
+  title     = {100% Renewable energy systems, climate mitigation and economic growth},
+  journal   = {Applied Energy},
+  year      = {2011},
+  volume    = {88},
+  number    = {2},
+  pages     = {488-501},
+  issn      = {0306-2619},
+  abstract  = {Greenhouse gas mitigation strategies are generally considered costly with world leaders often engaging in debate concerning the costs of mitigation and the distribution of these costs between different countries. In this paper, the analyses and results of the design of a 100% renewable energy system by the year 2050 are presented for a complete energy system including transport. Two short-term transition target years in the process towards this goal are analysed for 2015 and 2030. The energy systems are analysed and designed with hour-by-hour energy system analyses. The analyses reveal that implementing energy savings, renewable energy and more efficient conversion technologies can have positive socio-economic effects, create employment and potentially lead to large earnings on exports. If externalities such as health effects are included, even more benefits can be expected. 100% Renewable energy systems will be technically possible in the future, and may even be economically beneficial compared to the business-as-usual energy system. Hence, the current debate between leaders should reflect a combination of these two main challenges.},
+  doi       = {10.1016/j.apenergy.2010.03.001},
+  file      = {Mathiesen2011.pdf:Mathiesen2011.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {100% Renewable energy systems},
+  owner     = {pb},
+  timestamp = {2012.04.02},
+}
+
+@Article{Matics2008,
+  author    = {Jens Matics and Gerhard Krost},
+  title     = {Micro combined heat and power home supply: Prospective and adaptive management achieved by computational intelligence techniques},
+  journal   = {Applied Thermal Engineering},
+  year      = {2008},
+  volume    = {28},
+  number    = {16},
+  pages     = {2055-2061},
+  issn      = {1359-4311},
+  abstract  = {Micro combined heat and power (CHP) systems for single residential buildings are seen as advantageous to combine both decentralized power supply and rather high overall efficiency. The latter presupposes flexible and adaptive plant management which has to mediate between energy cost minimization and user comfort aspects. This is achieved by use of computational intelligence (CI) techniques; structure and performance of the management system are shown.},
+  doi       = {10.1016/j.applthermaleng.2008.05.002},
+  file      = {Matics2008.pdf:Matics2008.pdf:PDF},
+  groups    = {single house forecasting},
+  keywords  = {Distributed generation},
+  owner     = {pb},
+  timestamp = {2012.02.24},
+}
+
+@Article{Mei2003,
+  author                     = {Mei, L and Infield, D and Eicker, U and Fux, V},
+  title                      = {Thermal modelling of a building with an integrated ventilated PV facade},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2003},
+  volume                     = {35},
+  number                     = {6},
+  pages                      = {605-617},
+  month                      = {JUL},
+  issn                       = {0378-7788},
+  abstract                   = {This paper presents a dynamic thermal model based on TRNSYS, for a building with an integrated ventilated PV facade/solar air collector system. The building model developed has been validated against experimental data from a 6.5 m high PV facade on the Mataro Library near Barcelona. Preheating of the ventilation air within the facade is through incident solar radiation heating of the PV elements and subsequent heat transmission to the air within the ventilation gap. The warmed air can be used for building heating in winter. Modelled and measured air temperatures are found to be in good agreement. The heating and cooling loads for the building with and without such a ventilated facade have been calculated and the impact of climatic variations on the performance such buildings has also been investigated. It was found that the cooling loads are marginally higher with the PV facade for all locations considered, whereas the impact of the facade on the heating load depends critically on location. (C) 2002 Elsevier Science B.V. All rights reserved.},
+  file                       = {Mei2003.pdf:Mei2003.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {11},
+  owner                      = {pb},
+  review                     = {Quite complex dynamical model, but no statistical techniques applied, only transfer functions Trnsys.},
+  times-cited                = {33},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000182951200007},
+}
+
+@Article{Meibom2013,
+  author    = {Meibom, P. and Hilger, K.B. and Madsen, H. and Vinther, D.},
+  title     = {Energy Comes Together in Denmark: The Key to a Future Fossil-Free Danish Power System},
+  journal   = {Power and Energy Magazine, IEEE},
+  year      = {2013},
+  volume    = {11},
+  number    = {5},
+  pages     = {46-55},
+  issn      = {1540-7977},
+  abstract  = {The transition of the Danish energy system to a system based only on renewable energy in 2050 carries many challenges. For Denmark to become independent of fossil energy sources, wind power and biomass are expected to become the main sources of energy. Onshore and offshore wind farms are expected to provide the majority of electricity, and biomass and electricity are expected to become the major sources of heating. On the way toward the 100% renewable goal in 2050, the Danish government has proposed a 2035 midterm goal to cover the energy consumption for power and heat with renewables.},
+  doi       = {10.1109/MPE.2013.2268751},
+  file      = {Meibom2013.pdf:Meibom2013.pdf:PDF},
+  groups    = {OtherEnergyRelated},
+  keywords  = {bioenergy conversion;biofuel;electric heating;energy consumption;fossil fuels;offshore installations;wind power;wind power plants;Danish energy system;Danish government;Denmark;biomass;energy consumption;fossil energy source;fossil-free Danish power system;heating;offshore wind farm;onshore wind farm;renewable energy;wind power;Biomass;Electricity supply industry;Energy efficiency;Energy management;Heat pumps;Renewable energy resources;Resistance heating;Water heating;Wind power generation},
+  owner     = {pb},
+  timestamp = {2013.12.09},
+}
+
+@Article{Meibom2007,
+  author    = {Meibom, Peter and Kiviluoma, Juha and Barth, Rüdiger and Brand, Heike and Weber, Christoph and Larsen, Helge V.},
+  title     = {Value of electric heat boilers and heat pumps for wind power integration},
+  journal   = {Wind Energy},
+  year      = {2007},
+  volume    = {10},
+  number    = {4},
+  pages     = {321-337},
+  issn      = {1099-1824},
+  abstract  = {The paper analyses the economic value of using electric heat boilers and heat pumps as wind power integration measures relieving the link between the heat and power production in combined heat and power plants. Both measures have different technical and economic characteristics, making a comparison of the value of these measures relevant. A stochastic, fundamental bottom-up model, taking the stochastic nature of wind power production explicitly into account when making dispatch decisions, is used to analyse the technical and economical performance of these measures in a North European power system covering Denmark, Finland, Germany, Norway and Sweden. Introduction of heat pumps or electric boilers is beneficial for the integration of wind power, because the curtailment of wind power production is reduced, the price of regulating power is reduced and the number of hours with very low power prices is reduced, making the wind power production more valuable. The system benefits of heat pumps and electric boilers are connected to replacing heat production on fuel oil heat boilers and combined heat and power (CHP) plants using various fuels with heat production using electricity and thereby saving fuel. The benefits of the measures depend highly on the underlying structure of heat production. The integration measures are economical, especially in systems where the marginal heat production costs before the introduction of the heat measures are high, e.g. heat production on heat boilers using fuel oil. Copyright © 2007 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/we.224},
+  file      = {Meibom2007.pdf:Meibom2007.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {wind power, system integration, heat pumps, electric heat boilers, stochastic optimisation},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.04.02},
+}
+
+@PhdThesis{Mestekemper2011,
+  author    = {Mestekemper, T.},
+  title     = {Energy demand forecasting and dynamic water temperature management},
+  school    = {Bielefeld University},
+  year      = {2011},
+  file      = {Mestekemper2011.pdf:Mestekemper2011.pdf:PDF},
+  groups    = {single house forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.22},
+}
+
+@Book{MoralesGonzalez2014,
+  title     = {Integrating Renewables in Electricity Markets: Operational Problems},
+  publisher = {Springer},
+  year      = {2014},
+  author    = {Morales Gonz{\'a}lez, Juan Miguel and Conejo, Antonio J and Madsen, Henrik and Pinson, Pierre and Zugno, Marco},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.26},
+}
+
+@Article{Morales2012,
+  author    = {Morales, Juan M and Pinson, Pierre and Madsen, Henrik},
+  title     = {A transmission-cost-based model to estimate the amount of market-integrable wind resources},
+  journal   = {Power Systems, IEEE Transactions on},
+  year      = {2012},
+  volume    = {27},
+  number    = {2},
+  pages     = {1060--1069},
+  groups    = {Planning},
+  owner     = {pb},
+  publisher = {IEEE},
+  timestamp = {2014.09.26},
+}
+
+@Article{Morales2014,
+  author    = {Morales, Juan M and Zugno, Marco and Pineda, Salvador and Pinson, Pierre},
+  title     = {Electricity market clearing with improved scheduling of stochastic production},
+  journal   = {European Journal of Operational Research},
+  year      = {2014},
+  volume    = {235},
+  number    = {3},
+  pages     = {765--774},
+  groups    = {Markets},
+  owner     = {pb},
+  publisher = {North-Holland},
+  timestamp = {2014.09.26},
+}
+
+@Article{Morales2014a,
+  author    = {Morales, Juan M and Zugno, Marco and Pineda, Salvador and Pinson, Pierre},
+  title     = {Redefining the Merit Order of Stochastic Generation in Forward Markets},
+  year      = {2014},
+  groups    = {Markets},
+  owner     = {pb},
+  publisher = {IEEE},
+  timestamp = {2014.09.26},
+}
+
+@InProceedings{Mortensen2011,
+  Title                    = {Analysis of energy consumption in single family houses},
+  Author                   = {Mortensen, Stig B. and Nielsen, Henrik Aa.},
+  Booktitle                = {proceedings of DYNASTEE International workshop on Whole Building Test- ing, Evaluation and Modelling for Energy Assessment, 18-19 May 2011, Lyngby, Denmark.},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2013.06.09},
+  Url                      = {http://re.jrc.ec.europa.eu/energyefficiency/events/DYNASTEE_CPH11e/Paper_PDF_CPH/4_1_Nielsen_Enfor-dynastee-2011-05-paper.pdf}
+}
+
+@Article{Mount2006,
+  Title                    = {Predicting price spikes in electricity markets using a regime-switching model with time-varying parameters },
+  Author                   = {Timothy D. Mount and Yumei Ning and Xiaobin Cai},
+  Journal                  = {Energy Economics },
+  Year                     = {2006},
+  Number                   = {1},
+  Pages                    = {62 - 80},
+  Volume                   = {28},
+
+  Abstract                 = {This paper shows that a stochastic regime-switching model can represent the volatile behavior of wholesale electricity prices associated with price spikes effectively. The structure of the model is very flexible because the mean prices in the two regimes and the two transition probabilities are functions of the load and/or the implicit reserve margin. Using price data from the single settlement market in \{PJM\} (May 1999 to May 2000), the results show that the estimated switching probability from the low to the high regime predicts price spikes well if the reserve margin is measured accurately. },
+  Doi                      = {http://dx.doi.org/10.1016/j.eneco.2005.09.008},
+  File                     = {Mount2006.pdf:Mount2006.pdf:PDF},
+  ISSN                     = {0140-9883},
+  Keywords                 = {Price spikes},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.24},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0140988305000897}
+}
+
+@Article{Mueller2004,
+  author    = {R.W. Mueller and K.F. Dagestad and P. Ineichen and M. Schroedter-Homscheidt and S. Cros and D. Dumortier and R. Kuhlemann and J.A. Olseth and G. Piernavieja and C. Reise and L. Wald and D. Heinemann},
+  title     = {Rethinking satellite-based solar irradiance modelling: The SOLIS clear-sky module},
+  journal   = {Remote Sensing of Environment},
+  year      = {2004},
+  volume    = {91},
+  number    = {2},
+  pages     = {160-174},
+  issn      = {0034-4257},
+  abstract  = {Accurate solar irradiance data are not only of particular importance for the assessment of the radiative forcing of the climate system, but also absolutely necessary for efficient planning and operation of solar energy systems. Within the European project Heliosat-3, a new type of solar irradiance scheme is developed. This new type will be based on radiative transfer models (RTM) using atmospheric parameter information retrieved from the Meteosat Second Generation (MSG) satellite (clouds, ozone, water vapour) and the ERS-2/ENVISAT satellites (aerosols, ozone). This paper focuses on the description of the clear-sky module of the new scheme, especially on the integrated use of a radiative transfer model. The linkage of the clear-sky module with the cloud module is also briefly described in order to point out the benefits of the integrated RTM use for the all-sky situations. The integrated use of an RTM within the new Solar Irradiance Scheme SOLIS is applied by introducing a new fitting function called the modified Lambert–Beer (MLB) relation. Consequently, the modified Lambert–Beer relation and its role for an integrated RTM use are discussed. Comparisons of the calculated clear-sky irradiances with ground-based measurements and the current clear-sky module demonstrate the advantages and benefits of SOLIS. Since SOLIS can provide spectrally resolved irradiance data, it can be used for different applications. Beside improved information for the planning of solar energy systems, the calculation of photosynthetic active radiation, UV index, and illuminance is possible.},
+  doi       = {10.1016/j.rse.2004.02.009},
+  groups    = {correction},
+  keywords  = {Solar irradiance modelling},
+  owner     = {pb},
+  timestamp = {2012.03.17},
+}
+
+@Article{Naveros2014,
+  author    = {I. Naveros and P. Bacher and D.P. Ruiz and M.J. Jiménez and H. Madsen},
+  title     = {Setting up and validating a complex model for a simple homogeneous wall},
+  journal   = {Energy and Buildings},
+  year      = {2014},
+  volume    = {70},
+  number    = {0},
+  pages     = {303 - 317},
+  issn      = {0378-7788},
+  doi       = {http://dx.doi.org/10.1016/j.enbuild.2013.11.076},
+  file      = {Naveros2014.pdf:Naveros2014.pdf:PDF},
+  groups    = {Buildings},
+  keywords  = {Building energy},
+  owner     = {pb},
+  timestamp = {2014.03.30},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0378778813007937},
+}
+
+@Article{Neeper2000,
+  author                     = {Neeper, DA},
+  title                      = {Thermal dynamics of wallboard with latent heat storage},
+  journal                    = {SOLAR ENERGY},
+  year                       = {2000},
+  volume                     = {68},
+  number                     = {5},
+  pages                      = {393-403},
+  issn                       = {0038-092X},
+  abstract                   = {Wallboard impregnated with phase change material (PCM) will provide thermal storage that is distributed throughout a building, enabling passive solar design and off-peak cooling with frame construction. This paper examines the thermal dynamics of PCM wallboard that is subjected to the diurnal variation of room temperature, but is not directly illuminated by the sun. The purpose of this work is to provide guidelines useful in selecting an optimal PCM and in estimating the benefits of PCM architectural products. The energy stored during a daily cycle depends upon a) the melt temperature of the PCM; b) the temperature range over which melt occurs; and c) the latent capacity per unit area of wallboard. Situations with the wallboard on an interior partition or on the inside of the building envelope are investigated separately. The following findings are presented. The maximum diurnal energy storage occurs at a value of the PCM melt temperature that is close to the average room temperature in most circumstances. Diurnal energy storage decreases if the phase change transition occurs over a range of temperatures. The diurnal storage achieved in practice may be limited to the range 300-400 kJ/m(2), even if the wallboard has a greater latent capacity. The implications of these findings for test room experiments are discussed. (C) 2000 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Neeper2000.pdf:Neeper2000.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {27},
+  owner                      = {pb},
+  times-cited                = {73},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000087435100002},
+}
+
+@Article{Nelder1965,
+  Title                    = {A simplex method for function minimization},
+  Author                   = {Nelder, John A and Mead, Roger},
+  Journal                  = {The computer journal},
+  Year                     = {1965},
+  Number                   = {4},
+  Pages                    = {308--313},
+  Volume                   = {7},
+
+  File                     = {Nelder1965.pdf:Nelder1965.pdf:PDF},
+  Owner                    = {pb},
+  Publisher                = {Br Computer Soc},
+  Timestamp                = {2015.10.29}
+}
+
+@Article{Nguyen2013,
+  author    = {K.A. Nguyen and R.A. Stewart and H. Zhang},
+  title     = {An intelligent pattern recognition model to automate the categorisation of residential water end-use events},
+  journal   = {Environmental Modelling \& Software},
+  year      = {2013},
+  volume    = {47},
+  number    = {0},
+  pages     = {108-127},
+  issn      = {1364-8152},
+  abstract  = {Abstract The rapid dissemination of residential water end-use (e.g. shower, clothes washer, etc.) consumption data to the customer via a web-enabled portal interface is becoming feasible through the advent of high resolution smart metering technologies. However, in order to achieve this paradigm shift in residential customer water use feedback, an automated approach for disaggregating complex water flow trace signatures into a registry of end-use event categories needs to be developed. This outcome is achieved by applying a hybrid combination of gradient vector filtering, Hidden Markov Model (HMM) and Dynamic Time Warping Algorithm (DTW) techniques on an existing residential water end-use database of 252 households located in South-east Queensland, Australia having high resolution water meters (0.0139 L/pulse), remote data transfer loggers (5 s logging) and completed household water appliance audits. The approach enables both single independent events (e.g. shower event) and combined events (i.e. several overlapping single events) to be disaggregated from flow data into a comprehensive end-use event registry. Complex blind source separation of concurrently occurring water end use events (e.g. shower and toilet flush occurring in same time period) is the primary focus of this present study. Validation of the developed model is achieved through an examination of 50 independent combined events. },
+  doi       = {http://dx.doi.org/10.1016/j.envsoft.2013.05.002},
+  file      = {Nguyen2013.pdf:Nguyen2013.pdf:PDF},
+  groups    = {kernelpaper},
+  keywords  = {<!-- Tag Not Handled --><keyword id=#kwrd0010#>Water end-use event},
+  owner     = {pb},
+  timestamp = {2013.11.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S1364815213001084},
+}
+
+@InProceedings{nicolosi2011,
+  author    = {Nicolosi, M. and Nabe, C.},
+  title     = {The Long-Term Effects of High Shares of PV in the Power System--An Analysis of the German Power Market},
+  booktitle = {1st International Workshop on the Integration of Solar Power into Power Systems},
+  year      = {2011},
+  file      = {nicolosi2011.pdf:nicolosi2011.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.04.12},
+}
+
+@TechReport{Nielsen1997,
+  Title                    = {{LFLM version 1.0, an S-PLUS / R library for locally weighted fitting of linear models}},
+  Author                   = {Henrik Aalborg Nielsen},
+  Institution              = {Department of Mathematical Modelling, Technical University of Denmark},
+  Year                     = {1997},
+
+  Address                  = {Lyngby, Denmark},
+  Number                   = {22},
+
+  Owner                    = {pb},
+  Timestamp                = {2010.10.04}
+}
+
+@Article{Nielsen2006,
+  author    = {Nielsen, Henrik Aalborg and Madsen, Henrik},
+  title     = {Modelling the heat consumption in district heating systems using a grey-box approach},
+  journal   = {Energy \& Buildings},
+  year      = {2006},
+  volume    = {38},
+  number    = {1},
+  pages     = {63-71},
+  issn      = {03787788},
+  abstract  = {The heat consumption in a large geographical area is considered together with climate measurements on a single location in the area. The purpose is to identify a model linking the heat consumption to climate and calendar information.The process of building a model is split into a theoretical based identification of an overall model structure followed by data-based modelling, whereby the details of the model are identified. This approach is sometimes called grey-box modelling, but the specific approach used here does not require states to be specified. Overall, the paper demonstrates the power of the grey-box approach.},
+  doi       = {10.1016/j.enbuild.2005.05.002},
+  file      = {Nielsen2006.pdf:Nielsen2006.pdf:PDF},
+  groups    = {single house forecasting, Forecasting, load, OptimalBidding},
+  owner     = {pb},
+  timestamp = {2012.02.24},
+}
+
+@Article{Nielsen2001,
+  author    = {Nielsen, Henrik Aa. and Madsen, Henrik},
+  title     = {A generalization of some classical time series tools},
+  journal   = {Computational Statistics and Data Analysis},
+  year      = {2001},
+  volume    = {37},
+  number    = {1},
+  pages     = {13-31},
+  issn      = {01679473},
+  abstract  = {In classical time series analysis the sample autocorrelation function (SACF) and the sample partial autocorrelation function (SPACF) has gained wide application for structural identification of linear time series models. We suggest generalizations, founded on smoothing techniques, applicable for structural identification of non-linear time series models. A similar generalization of the sample cross correlation function is discussed. Furthermore, a measure of the departure from linearity is suggested. It is shown how bootstrapping can be applied to construct confidence intervals under independence or linearity. The generalizations do not prescribe a particular smoothing technique. In fact, when the smoother is replaced by a linear regression the generalizations reduce to close approximations of SACF and SPACF. For this reason a smooth transition from the linear to the non-linear case can be obtained by varying the bandwidth of a local linear smoother. By adjusting the flexibility of the smoother, the power of the tests for independence and linearity against specific alternatives can be adjusted. The generalizations allow for graphical presentations, very similar to those used for SACF and SPACF. In this paper the generalizations are applied to some simulated data sets and to the Canadian lynx data. The generalizations seem to perform well and the measure of the departure from linearity proves to be an important additional tool.},
+  copyright = {Elsevier Science B.V.},
+  file      = {Nielsen2001.pdf:Nielsen2001.pdf:PDF},
+  groups    = {Adaptive Autocorrelation, TheoreticalModeling},
+  language  = {English},
+  owner     = {pb},
+  timestamp = {2011.01.07},
+}
+
+@Article{Nielsen2006a,
+  author    = {Nielsen, Henrik Aalborg and Madsen, Henrik and Nielsen, Torben Skov},
+  title     = {Using quantile regression to extend an existing wind power forecasting system with probabilistic forecasts},
+  journal   = {Wind Energy},
+  year      = {2006},
+  volume    = {9},
+  number    = {1-2},
+  pages     = {95-108},
+  issn      = {1099-1824},
+  abstract  = {For operational planning it is important to provide information about the situation-dependent uncertainty of a wind power forecast. Factors which influence the uncertainty of a wind power forecast include the predictability of the actual meteorological situation, the level of the predicted wind speed (due to the non-linearity of the power curve) and the forecast horizon. With respect to the predictability of the actual meteorological situation a number of explanatory variables are considered, some inspired by the literature. The article contains an overview of related work within the field. An existing wind power forecasting system (Zephyr/WPPT) is considered and it is shown how analysis of the forecast error can be used to build a model of the quantiles of the forecast error. Only explanatory variables or indices which are predictable are considered, whereby the model obtained can be used for providing situation-dependent information regarding the uncertainty. Finally, the article contains directions enabling the reader to replicate the methods and thereby extend other forecast systems with situation-dependent information on uncertainty. Copyright © 2005 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/we.180},
+  file      = {Nielsen2006a.pdf:Nielsen2006a.pdf:PDF},
+  groups    = {Forecasting, wind, OptimalBidding},
+  keywords  = {wind power forecasting, uncertainty, quantile regression, additive model},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.04.04},
+}
+
+@Conference{Nielsen2010,
+  Title                    = {Analysis of energy consumption in single family houses},
+  Author                   = {Nielsen, H. A. and Mortensen, S. B. and Bacher, P. and Madsen, H.},
+  Booktitle                = {DYNASTEE, 11-12 October 2010, Brussels},
+  Year                     = {2010},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.10}
+}
+
+@InProceedings{Nielsen2002,
+  Title                    = {On on-line systems for short-term forecasting for energy systems},
+  Author                   = {Nielsen, H. A. and Nielsen, T.S. and Madsen, H.},
+  Booktitle                = {Proceedings OR 2002 Conference, p. 265-271, Klagenfurt, Austria, Springer, 2002},
+  Year                     = {2002},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.04.10}
+}
+
+@Article{Nielsen2007,
+  Title                    = {Optimal combination of wind power forecasts},
+  Author                   = {Nielsen, Henrik Aa. and Nielsen, Torben S. and Madsen, Henrik and Pindado, Maria J. San Isidro and Marti, Ignacio},
+  Journal                  = {Wind Energy},
+  Year                     = {2007},
+  Number                   = {5},
+  Pages                    = {471--482},
+  Volume                   = {10},
+
+  Abstract                 = {We consider wind power forecasts based on a number of different meteorological forecasts originating from three different global meteorological models. Wind power forecasts based on these meteorological forecasts have fairly similar performance. However, in the paper, we show that the wind power forecast errors are relatively uncorrelated. For this reason, we can combine the forecasts and obtain a final forecast which performs better than any of the individual forecasts. Optimal weights are found based on the bias of the individual forecasts and the variance–covariance matrix of the individual forecast errors. In the paper, we show that quite significant improvements can be obtained using only a few different meteorological forecasts. Copyright © 2007 John Wiley & Sons, Ltd.},
+  Doi                      = {10.1002/we.237},
+  File                     = {Nielsen2007.pdf:Nielsen2007.pdf:PDF},
+  ISSN                     = {1099-1824},
+  Keywords                 = {wind power forecasting, combination of forecasts, combined forecasting, correlation of forecast errors},
+  Owner                    = {pb},
+  Publisher                = {John Wiley \& Sons, Ltd.},
+  Timestamp                = {2016.01.04},
+  Url                      = {http://dx.doi.org/10.1002/we.237}
+}
+
+@TechReport{Nielsen2000,
+  author      = {Nielsen, H.A. and Madsen, H. and Danmark. Energiministeriets Forskningsprogram. Produktion og Fordeling af El og Varme},
+  title       = {Predicting the heat consumption in district heating systems using meteorological forecasts},
+  institution = {DTU IMM},
+  year        = {2000},
+  file        = {Nielsen2000.pdf:Nielsen2000.pdf:PDF},
+  groups      = {single house forecasting, Forecasting, load},
+  owner       = {pb},
+  timestamp   = {2012.03.22},
+}
+
+@Article{Nielsen2000a,
+  author    = {Nielsen, H.A. and Nielsen, T.S. and Joensen, A.K. and Madsen, H. and Holst, J.},
+  title     = {Tracking time-varying-coefficient functions},
+  journal   = {International Journal of Adaptive Control and Signal Processing},
+  year      = {2000},
+  volume    = {14},
+  number    = {8},
+  pages     = {813-828},
+  file      = {Nielsen2000a.pdf:Nielsen2000a.pdf:PDF},
+  groups    = {TheoreticalModeling},
+  owner     = {pb},
+  publisher = {Citeseer},
+  timestamp = {2012.04.14},
+}
+
+@InProceedings{Nielsen2011,
+  Title                    = {An overview of wind power forecasts types and their use in large-scale integration of wind power},
+  Author                   = {Nielsen, H and Nielsen, T and Madsen, Henrik},
+  Booktitle                = {Proceedings of the 10th International Workshop on Large-Scale Integration of Wind Power into Power Systems},
+  Year                     = {2011},
+
+  File                     = {Nielsen2011.pdf:Nielsen2011.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2016.01.06}
+}
+
+@InProceedings{Nielsen2006b,
+  Title                    = {From wind ensembles to probabilistic information about future wind power production - results from an actual application},
+  Author                   = {Nielsen, H.A. and Nielsen, T.S. and Madsen, H. and Giebel, G. and Badger, J. and Landbergt, L. and Sattler, K. and Voulund, L. and Tofting, J.},
+  Booktitle                = {Probabilistic Methods Applied to Power Systems, 2006. PMAPS 2006. International Conference on},
+  Year                     = {2006},
+  Month                    = {june},
+  Pages                    = {1-8},
+
+  Doi                      = {10.1109/PMAPS.2006.360289},
+  File                     = {Nielsen2006b.pdf:Nielsen2006b.pdf:PDF},
+  Keywords                 = {meteorological ensemble wind forecast;offshore wind farm;probabilistic forecasting;statistical modelling;wind power production;statistical analysis;weather forecasting;wind power;wind power plants;},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.15}
+}
+
+@Article{Nielsen2000b,
+  author    = {Jan Nygaard Nielsen and Henrik Madsen and Peter C. Young},
+  title     = {Parameter estimation in stochastic differential equations: An overview},
+  journal   = {Annual Reviews in Control},
+  year      = {2000},
+  volume    = {24},
+  number    = {0},
+  pages     = {83-94},
+  issn      = {1367-5788},
+  abstract  = {This paper presents an overview of the progress of research on parameter estimation methods for stochastic differential equations (mostly in the sense of Itô calculus) over the period 1981–1999. These are considered both without measurement noise and with measurement noise, where the discretely observed stochastic differential equations are embedded in a continuous-discrete time state space model. Every attempts has been made to include results from other scientific disciplines. Maximum likelihood estimation of parameters in nonlinear stochastic differential equations is in general not possible due to the unavailability of closed form expressions for the transition and stationary probability density functions of the states. However, major developments are classified according to their approximation to the “true” maximum likelihood solution as opposed to a historical order of presentation. },
+  doi       = {http://dx.doi.org/10.1016/S1367-5788(00)90017-8},
+  file      = {Nielsen2000b.pdf:Nielsen2000b.pdf:PDF},
+  groups    = {TheoreticalModeling},
+  keywords  = {Brownian motion},
+  owner     = {pb},
+  timestamp = {2013.07.01},
+  url       = {http://www.sciencedirect.com/science/article/pii/S1367578800900178},
+}
+
+@InProceedings{nielsen2006short,
+  Title                    = {Short-term wind power forecasting using advanced statistical methods},
+  Author                   = {Nielsen, TS and Madsen, H and Nielsen, H Aa and Pinson, Pierre and Kariniotakis, Georges and Siebert, Nils and Marti, Ignacio and Lange, Matthias and Focken, Ulrich and Bremen, Lueder V and others},
+  Booktitle                = {Proceedings of The European Wind Energy Conference, EWEC 2006},
+  Year                     = {2006},
+
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2013.08.08}
+}
+
+@InProceedings{Nielsen2002a,
+  author    = {Nielsen, T. S. and Madsen, H.},
+  title     = {Control of Supply Temperature in District Heating Systems},
+  booktitle = {Proceedings of the 8th International Symposium on District heating and Cooling},
+  year      = {2002},
+  address   = {Trondheim, Norway},
+  groups    = {Optimization, MPC},
+  owner     = {pb},
+  timestamp = {2011.04.28},
+}
+
+@Article{Nielsen2002b,
+  author    = {Nielsen, Torben Skov and Madsen, Henrik and Nielsen, Henrik Aalborg},
+  title     = {Prediction of Wind Power Using Time-varying Coefficient-functions},
+  journal   = {Proceedings of the 15th IFAC World Congress on Automatic Control},
+  year      = {2002},
+  groups    = {wind},
+  owner     = {pb},
+  timestamp = {2011.09.16},
+}
+
+@Article{Ohtake2015,
+  Title                    = {Regional and seasonal characteristics of global horizontal irradiance forecasts obtained from the Japan Meteorological Agency mesoscale model },
+  Author                   = {Hideaki Ohtake and Joao Gari da Silva Fonseca Jr. and Takumi Takashima and Takashi Oozeki and Ken-ichi Shimose and Yoshinori Yamada},
+  Journal                  = {Solar Energy },
+  Year                     = {2015},
+  Number                   = {0},
+  Pages                    = {83 - 99},
+  Volume                   = {116},
+
+  Abstract                 = {Abstract To obtain accurate forecasts of photovoltaic power generation, the use of forecast datasets of meteorological elements from numerical prediction models, specifically global horizontal irradiance (GHI), is necessary. This study seeks to validate, and therefore improve \{GHI\} forecasts. Ground-based data from Japan Meteorological Agency (JMA) stations are used in a \{JMA\} mesoscale model (MSM) during the time period from 2008 to 2012 and temporal and spatial characteristics of forecast errors are analyzed. Statistical monthly evaluations show that associated errors vary between seasons, with monthly \{GHI\} mean bias error values ranging from −60 to +45&#xa0;W/m2 and root mean square errors (RMSEs) ranging from 95 to 170&#xa0;W/m2. Mapping of forecast errors show that underestimation of \{GHI\} forecast values and large \{RMSE\} values are significant in the southern part of Japan (a subtropical region located along the Pacific Ocean), particularly during summers. In winter, overestimation of \{GHI\} forecasts is found throughout the entire Japanese archipelago. The frequency of different cloud type occurrences over the Japanese islands indicate that regional and seasonal variations in cloud types are related to relatively large \{GHI\} forecast errors. High-level cirrus clouds, mid-level altocumulus, and low-level stratus are often observed during summer, when forecasted values are underestimated, and during winter, when values are overestimated. },
+  Doi                      = {http://dx.doi.org/10.1016/j.solener.2015.03.020},
+  ISSN                     = {0038-092X},
+  Keywords                 = {Mesoscale model},
+  Owner                    = {pb},
+  Timestamp                = {2015.05.04},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0038092X15001383}
+}
+
+@Article{Orgill1977,
+  Title                    = {{Correlation equation for hourly diffuse radiation on a horizontal surface}},
+  Author                   = {Orgill, J. F. and Hollands, K. G. T.},
+  Journal                  = {{Solar Energy}},
+  Year                     = {{1977}},
+  Number                   = {{4}},
+  Pages                    = {{357-359}},
+  Volume                   = {{19}},
+
+  Address                  = {{THE BOULEVARD, LANGFORD LANE, KIDLINGTON, OXFORD, ENGLAND OX5 1GB}},
+  Affiliation              = {{UNIV WATERLOO,DEPT MECH ENGN,WATERLOO N2L 3G1,ONTARIO,CANADA.}},
+  Doc-delivery-number      = {{DQ517}},
+  Doi                      = {{10.1016/0038-092X(77)90006-8}},
+  File                     = {Orgill1977.pdf:Orgill1977.pdf:PDF},
+  ISSN                     = {{0038-092X}},
+  Journal-iso              = {{Sol. Energy}},
+  Language                 = {{English}},
+  Owner                    = {pb},
+  Publisher                = {{PERGAMON-ELSEVIER SCIENCE LTD}},
+  Timestamp                = {2012.03.18},
+  Type                     = {{Article}},
+  Unique-id                = {{ISI:A1977DQ51700006}}
+}
+
+@Article{Palensky2011,
+  Title                    = {Demand Side Management: Demand Response, Intelligent Energy Systems, and Smart Loads},
+  Author                   = {Palensky, P. and Dietrich, D.},
+  Journal                  = {Industrial Informatics, IEEE Transactions on},
+  Year                     = {2011},
+
+  Month                    = {aug. },
+  Number                   = {3},
+  Pages                    = {381-388},
+  Volume                   = {7},
+
+  Abstract                 = {Energy management means to optimize one of the most complex and important technical creations that we know: the energy system. While there is plenty of experience in optimizing energy generation and distribution, it is the demand side that receives increasing attention by research and industry. Demand Side Management (DSM) is a portfolio of measures to improve the energy system at the side of consumption. It ranges from improving energy efficiency by using better materials, over smart energy tariffs with incentives for certain consumption patterns, up to sophisticated real-time control of distributed energy resources. This paper gives an overview and a taxonomy for DSM, analyzes the various types of DSM, and gives an outlook on the latest demonstration projects in this domain.},
+  Doi                      = {10.1109/TII.2011.2158841},
+  ISSN                     = {1551-3203},
+  Keywords                 = {demand response;demand side management;distributed energy resources;energy distribution;energy generation;energy management;intelligent energy systems;smart loads;sophisticated real-time control;demand side management;electric power generation;energy management systems;power distribution;},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.01}
+}
+
+@Article{Pandzic2013,
+  author    = {Pand{\v{z}}i{\'c}, Hrvoje and Morales, Juan M and Conejo, Antonio J and Kuzle, Igor},
+  title     = {Offering model for a virtual power plant based on stochastic programming},
+  journal   = {Applied Energy},
+  year      = {2013},
+  volume    = {105},
+  pages     = {282--292},
+  groups    = {Markets},
+  owner     = {pb},
+  publisher = {Elsevier},
+  timestamp = {2014.09.26},
+}
+
+@Article{Papalexopoulos1990,
+  Title                    = {A regression-based approach to short-term system load forecasting},
+  Author                   = {Papalexopoulos, A.D. and Hesterberg, T.C.},
+  Journal                  = {Power Systems, IEEE Transactions on},
+  Year                     = {1990},
+  Number                   = {4},
+  Pages                    = {1535-1547},
+  Volume                   = {5},
+
+  Abstract                 = {A linear regression-based model for the calculation of short-term system load forecasts is described. The model's most significant aspects fall into the following areas: innovative model building, including accurate holiday modeling by using binary variables and temperature modeling by using heating and cooling degree functions; robust parameter estimation and parameter estimation under heteroskedasticity by using weighted least-squares linear regression techniques; use of `reverse errors-in-variables' techniques to mitigate the effects on load forecasts of potential errors in the explanatory variables; and distinction between time-independent daily peak load forecasts and the maximum of the hourly load forecasts in order to prevent peak forecasts from being negatively biased. The model was tested under a wide variety of conditioning and is shown to produce excellent results},
+  Doi                      = {10.1109/59.99410},
+  File                     = {Papalexopoulos1990.pdf:Papalexopoulos1990.pdf:PDF},
+  ISSN                     = {0885-8950},
+  Keywords                 = {load forecasting;cooling degree functions;heating degree functions;heteroskedasticity;holiday modeling;hourly load forecasts;model building;parameter estimation;regression-based approach;reverse errors-in-variables;short-term system load forecasting;temperature modeling;time-independent daily peak load forecasts;weighted least-squares linear regression;Economic forecasting;Load flow;Load forecasting;Load modeling;Parameter estimation;Power system control;Power system modeling;Power system security;Power systems;Predictive models},
+  Owner                    = {pb},
+  Timestamp                = {2013.11.12}
+}
+
+@Article{Park2004,
+  author    = {Cheol-Soo Park and Godfried Augenbroe and Tahar Messadi and Mate Thitisawat and Nader Sadegh},
+  title     = {Calibration of a lumped simulation model for double-skin facade systems},
+  journal   = {Energy and Buildings},
+  year      = {2004},
+  volume    = {36},
+  number    = {11},
+  pages     = {1117-1130},
+  issn      = {0378-7788},
+  abstract  = {The paper describes the calibration of a simulation model of double-skin facade systems with controlled rotating louvers and ventilation openings. The approach is based on a parameter estimation technique and in situ monitoring of a full-scale element mounted on the south facing facade of an existing building. Contrary to similar work that attempts to derive the behavior from detailed modeling of the physical transport phenomena, the new approach is based on a postulated #minimalistic# lumped model, which is calibrated on in-situ measurements. It is found that the calibrated model is surprisingly accurate and ideally suited for use in the ensuing optimal control and performance studies.},
+  doi       = {DOI: 10.1016/j.enbuild.2004.04.003},
+  file      = {Park2004.pdf:Park2004.pdf:PDF},
+  groups    = {Buildings},
+  keywords  = {Lumped model},
+  owner     = {pb},
+  review    = {Lumped non-linear modelling.},
+  timestamp = {2010.11.17},
+}
+
+@Book{Pawitan2001,
+  Title                    = {In All Likelihood: Statistical Modelling and Inference Using Likelihood},
+  Author                   = {Pawitan, Y},
+  Publisher                = {Oxford University Press},
+  Year                     = {2001},
+
+  File                     = {:books/Yudi Pawitan-In All Likelihood_ Statistical Modelling and Inference Using Likelihood-Oxford University Press, USA (2001).pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2010.11.22}
+}
+
+@InProceedings{Pedersen2011,
+  author    = {Pedersen, T.S. and Andersen, P. and Nielsen, K.M. and Starmose, H.L. and Pedersen, P.D.},
+  title     = {Using heat pump energy storages in the power grid},
+  booktitle = {Control Applications (CCA), 2011 IEEE International Conference on},
+  year      = {2011},
+  pages     = {1106-1111},
+  month     = {sept.},
+  abstract  = {The extensive growth of installed wind energy plants lead to increasing balancing problems in the power grid due to the nature of wind fields and diurnal variations in consumption. One way to overcome these problems is to move consumption to times where wind power otherwise cause overproduction and large fluctuations in prices. The paper presents a method which takes advantage of heat capacity in single-family houses using heat pumps which are anticipated to be installed in large numbers in Denmark in next decade. This type of heating gives a large time constant and it is shown possible to move consumption without compromising the comfort of house residents. In the paper an optimization exploiting forecasts of weather and energy prices combined with prediction models of house dynamics is presented. The results show that with the presented method it will be possible to move a substantial amount of energy from one time to another.},
+  doi       = {10.1109/CCA.2011.6044504},
+  file      = {Pedersen2011.pdf:Pedersen2011.pdf:PDF},
+  groups    = {single house forecasting},
+  issn      = {1085-1992},
+  keywords  = {Denmark;balancing problems;diurnal variations;energy prices;heat capacity;heat pump energy storages;house residents;installed wind energy plants;optimization;power grid;prediction models;weather forecasts;wind fields;energy storage;heat pumps;optimisation;power grids;wind power plants;},
+  owner     = {pb},
+  timestamp = {2012.02.27},
+}
+
+@Article{Pelland2011,
+  author    = {Pelland, Sophie and Galanis, George and Kallos, George},
+  title     = {Solar and photovoltaic forecasting through post-processing of the Global Environmental Multiscale numerical weather prediction model},
+  journal   = {Progress in Photovoltaics: Research and Applications},
+  year      = {2011},
+  pages     = {n/a-n/a},
+  issn      = {1099-159X},
+  abstract  = {Hourly solar and photovoltaic (PV) forecasts for horizons between 0 and 48 h ahead were developed using Environment Canada's Global Environmental Multiscale model. The motivation for this research was to explore PV forecasting in Ontario, Canada, where feed-in tariffs are driving rapid growth in installed PV capacity. The solar and PV forecasts were compared with irradiance data from 10 North-American ground stations and with alternating current power data from three Canadian PV systems. A 1-year period was used to train the forecasts, and the following year was used for testing. Two post-processing methods were applied to the solar forecasts: spatial averaging and bias removal using a Kalman filter. On average, these two methods lead to a 43% reduction in root mean square error (RMSE) over a persistence forecast (skill score = 0.67) and to a 15% reduction in RMSE over the Global Environmental Multiscale forecasts without post-processing (skill score = 0.28). Bias removal was primarily useful when considering a “regional” forecast for the average irradiance of the 10 ground stations because bias was a more significant fraction of RMSE in this case. PV forecast accuracy was influenced mainly by the underlying (horizontal) solar forecast accuracy, with RMSE ranging from 6.4% to 9.2% of rated power for the individual PV systems. About 76% of the PV forecast errors were within ±5% of the rated power for the individual systems, but the largest errors reached up to 44% to 57% of rated power. © HerMajesty the Queen in Right of Canada 2011. Reproduced with the permission of the Minister of Natural Resources Canada.},
+  doi       = {10.1002/pip.1180},
+  file      = {Pelland2011.pdf:Pelland2011.pdf:PDF},
+  groups    = {forecasting, phdthesis},
+  keywords  = {solar forecasting, photovoltaic forecasting, numerical weather prediction, post-processing, Kalman filter, spatial averaging},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd},
+  timestamp = {2012.04.03},
+}
+
+@InProceedings{Penya2011,
+  Title                    = {Short-term load forecasting in air-conditioned non-residential Buildings},
+  Author                   = {Penya, Y.K. and Borges, C.E. and Agote, D. and Fernandez, I.},
+  Booktitle                = {Industrial Electronics (ISIE), 2011 IEEE International Symposium on},
+  Year                     = {2011},
+  Pages                    = {1359-1364},
+
+  Abstract                 = {Short-term load forecasting (STLF) has become an essential tool in the electricity sector. It has been classically object of vast research since energy load prediction is known to be non-linear. In a previous work, we focused on non-residential building STLF, an special case of STLF where weather has negligible influence on the load. Now we tackle more modern buildings in which the temperature does alter its energy consumption. This is, we address here fully-HVAC (Heating, Ventilating, and Air Conditioning) ones. Still, in this problem domain, the forecasting method selected must be simple, without tedious trial-and-error configuring or parametrising procedures, work with scarce (or any) training data and be able to predict an evolving demand curve. Following our preceding research, we have avoided the inherent non-linearity by using the work day schedule as day-type classifier. We have evaluated the most popular STLF systems in the literature, namely ARIMA (autoregressive integrated moving average) time series and Neural networks (NN), together with an Autoregressive Model (AR) time series and a Bayesian network (BN), concluding that the autoregressive time series outperforms its counterparts and suffices to fulfil the addressed requirements, even in a 6 day-ahead horizon.},
+  Doi                      = {10.1109/ISIE.2011.5984356},
+  File                     = {Penya2011.pdf:Penya2011.pdf:PDF},
+  ISSN                     = {Pending},
+  Keywords                 = {Bayes methods;HVAC;autoregressive moving average processes;building management systems;energy consumption;load forecasting;neural nets;time series;ARIMA;Bayesian network;STLF systems;air conditioning;air-conditioned nonresidential buildings;autoregressive integrated moving average time series;autoregressive model time series;day-type classifier;demand curve;electricity sector;energy consumption;energy load prediction;fully-HVAC;heating;modern buildings;neural networks;short-term load forecasting;training data;ventilating;work day schedule;Artificial neural networks;Bayesian methods;Buildings;Data models;Forecasting;Load modeling;Meteorology},
+  Owner                    = {pb},
+  Quality                  = {1},
+  Timestamp                = {2013.11.12}
+}
+
+@TechReport{Perers2006,
+  author      = {Perers, B.},
+  title       = {Thermal solar systems and components - Solar collectors - Part 2: Test methods},
+  institution = {Swedish Stardards Institute},
+  year        = {2006},
+  file        = {:/home/pb/literature/techreports/SS-EN_12975-2_2006.pdf:PDF},
+  groups      = {Collector testing},
+  owner       = {pb},
+  review      = {Bengts egen private kopi.},
+  timestamp   = {2009.08.14},
+}
+
+@Article{Perers1997,
+  author    = {Perers, Bengt},
+  title     = {An improved dynamic solar collector test method for determination of non-linear optical and thermal characteristics with multiple regression},
+  journal   = {Solar Energy},
+  year      = {1997},
+  volume    = {59},
+  number    = {4-6},
+  pages     = {163-178},
+  issn      = {0038092x},
+  abstract  = {The objective is to characterise the solar collector during a relatively short testing period with no requirement for steady state climatic conditions. This information is then used for predicting annual performance of the collector. A standard collector model that is compatible with the ISO 9806-1 test standard is used with correction terms for beam and diffuse incidence angle modifiers, thermal capacitance, wind speed and sky temperature. This results in a more complete characterisation of the collector. The collector parameters are identified by multiple linear regression, MLR. The method has been tested for characterisation of unglazed collectors, glazed flat plate collectors, evacuated tubular collectors, CPC collectors and concentrating collectors with satisfying results. Typically the correlation coefficient R2 is better than 0.99 and the standard deviation of the difference between model and measurement is in the range 3-10 W/m2. In the original method the angular dependence of the optical efficiency and the temperature dependence of the heat losses are supposed to be adjusted to a predetermined function. The most recent development is a routine that makes it possible to accurately identify non-linear optical and thermal performance. This extended MLR method can identify the zero loss efficiency for every angle of incidence interval and the temperature dependent heat losses for every temperature interval. This opens the application of the method to collectors with special incidence angle and heat loss effects that cannot be described easily with a combination of elementary functions. Instead a table of parameter values is determined, which is used directly in standard simulation programmes. This method will further increase the accuracy when comparing different collector designs. It has been used for comparing different glazings and for comparison with spectrophotometric measurements. It has also been used for analysing the heat loss factors for Teflon and honeycomb glazings. Since the total power output of the collector is less dependent on the heat loss coefficient than on the optical efficiency the scattering in this data is larger than for the incidence angle curves. The reflectance of booster mirrors cannot be derived with the MLR-method with acceptable accuracy. The correlation between direct irradiance and irradiance from the reflector exhibit a very strong correlation. Instead the effective reflectance of the mirror can be estimated by comparison of the measured output with calculation by the complete collector and reflector model. This effective reflectance is not compatible with the specular reflectance obtained from spectrophotometric measurements caused by large differences in acceptance angles. Standard multiple linear regression available in most spread sheet and statistical programs can be used for the parameter identification in the extended MLR-procedure. The identification takes only a few seconds. At the Älvkarleby Laboratory the test method is now used as a routine tool for the evaluation of new collector materials and designs. The Swedish National testing institute has evaluated the methods with the conclusion that they have a potential for being used in standardised collector testing.},
+  file      = {Perers1997.pdf:Perers1997.pdf:PDF},
+  groups    = {Collector testing},
+  owner     = {pb},
+  timestamp = {2011.02.24},
+}
+
+@InProceedings{Perers2011,
+  author      = {Perers, Bengt and Furbo, Simon and Fan, Jianhua and Andersen, Elsa and Chen, Ziqian},
+  title       = {Solar combisystems with forecast control to increase the solar fraction and lower the auxiliary energy cost},
+  booktitle   = {ISES Solar World Congress 2011 Proceedings},
+  year        = {2011},
+  pages       = {{}},
+  note        = {Presented at: ISES Solar World Congress, SWC ; 30 : Kassel, Germany, 2011},
+  abstract    = {Solar Combi systems still need quite a lot of auxiliary energy especially in small systems without seasonal storage possibilities. The control of the auxiliary energy input both in time and power is important to utilize as much as possible of the solar energy available from the collectors and also to use low backup energy prices during the day if electricity is used. The storage function and both stratified charging and extraction of heat, are very important, to separate different temperature zones in the storage. This paper describes a step towards forecast control for electricity based auxiliary energy sources. It can be either direct electric heating elements or a heat pump upgrading ambient energy in the air, ground, solar collector or waste heat from the house. The paper describes system modeling and simulation results. Advanced laboratory experiments are also starting now with three different combisystems, operating in parallel. These systems will be briefly described too.},
+  affiliation = {Technical University of Denmark, Department of Civil Engineering, Section for Building Physics and Services and Technical University of Denmark, Department of Civil Engineering, Section for Building Physics and Services and Technical University of Denmark, Department of Civil Engineering, Section for Building Physics and Services and Technical University of Denmark, Department of Civil Engineering, Section for Building Physics and Services and Technical University of Denmark, Department of Civil Engineering, Section for Building Physics and Services},
+  file        = {Perers2011.pdf:Perers2011.pdf:PDF},
+  groups      = {single house forecasting},
+  isbn        = {978-3-9814659-0-7},
+  language    = {English},
+  location    = {Kassel, Germany},
+  owner       = {pb},
+  timestamp   = {2012.03.27},
+}
+
+@Article{Perez-Lombard2011,
+  Title                    = {A review of \{HVAC\} systems requirements in building energy regulations },
+  Author                   = {Luis Pérez-Lombard and José Ortiz and Juan F. Coronel and Ismael R. Maestre},
+  Journal                  = {Energy and Buildings },
+  Year                     = {2011},
+  Number                   = {2–3},
+  Pages                    = {255 - 268},
+  Volume                   = {43},
+
+  Abstract                 = {Building energy regulations, also referred to as building energy codes, emerged in the 1970s as an essential tool for improving energy efficiency and minimising energy consumption in buildings. Basically they aim at setting minimum energy efficiency requirements to achieve energy efficient design in new buildings. This paper analyses the development of building energy codes concerning Heating, Ventilation and Air-Conditioning (HVAC) energy efficiency, along with their scope and compliance paths. The paper focuses on the synthesis of energy efficiency requirements on \{HVAC\} systems of non-residential buildings in different regulations. Critical issues for the development of prescriptive and performance regulatory paths for this type of systems in non-residential buildings are discussed in order to improve the understanding of \{HVAC\} energy efficiency topics and to provide policy makers with a menu of options to strengthen the \{HVAC\} section of building energy codes. },
+  Doi                      = {http://dx.doi.org/10.1016/j.enbuild.2010.10.025},
+  File                     = {Perez-Lombard2011.pdf:Perez-Lombard2011.pdf:PDF},
+  ISSN                     = {0378-7788},
+  Keywords                 = {Building energy regulations},
+  Owner                    = {pbac},
+  Timestamp                = {2017.03.27},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0378778810003774}
+}
+
+@Article{Perez-Lombard2009,
+  Title                    = {A review of benchmarking, rating and labelling concepts within the framework of building energy certification schemes },
+  Author                   = {Luis Pérez-Lombard and José Ortiz and Rocío González and Ismael R. Maestre},
+  Journal                  = {Energy and Buildings },
+  Year                     = {2009},
+  Number                   = {3},
+  Pages                    = {272 - 278},
+  Volume                   = {41},
+
+  Abstract                 = {Energy certification schemes for buildings emerged in the early 1990s as an essential method for improving energy efficiency, minimising energy consumption and enabling greater transparency with regards to the use of energy in buildings. However, from the beginning their definition and implementation process were diffuse and, occasionally, have confused building sector stakeholders. A multiplicity of terms and concepts such as energy performance, energy efficiency, energy ratings, benchmarking, labelling, etc., have emerged with sometimes overlapping meanings. This has frequently led to misleading interpretations by regulatory bodies, energy agencies and final consumers. This paper analyses the origin and the historic development of energy certification schemes in buildings along with the definition and scope of a building energy certificate and critical aspects of its implementation. Concepts such as benchmarking tools, energy ratings and energy labelling are clarified within the wider topic of certification schemes. Finally, a seven steps process is proposed as a guide for implementing building energy certification. },
+  Doi                      = {http://dx.doi.org/10.1016/j.enbuild.2008.10.004},
+  File                     = {Perez-Lombard2009.pdf:Perez-Lombard2009.pdf:PDF},
+  ISSN                     = {0378-7788},
+  Keywords                 = {Energy certification},
+  Owner                    = {pbac},
+  Timestamp                = {2017.03.27},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S037877880800220X}
+}
+
+@Article{Pineda2014,
+  author    = {Pineda, Salvador and Morales, Juan Miguel},
+  title     = {Modeling the Impact of Imbalance Costs on Generating Expansion of Stochastic Units},
+  journal   = {arXiv preprint arXiv:1402.4593},
+  year      = {2014},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.26},
+}
+
+@Article{Pineda2014a,
+  author    = {Pineda, Salvador and Morales, Juan Miguel and Boomsma, Trine Krogh},
+  title     = {Impact of Forecast Errors on Expansion Planning of Power Systems with a Renewables Target},
+  journal   = {arXiv preprint arXiv:1402.7163},
+  year      = {2014},
+  groups    = {Planning},
+  owner     = {pb},
+  timestamp = {2014.09.26},
+}
+
+@Article{Pinson2007,
+  author    = {Pinson, P. and Chevallier, C. and Kariniotakis, G.N.},
+  title     = {Trading Wind Generation From Short-Term Probabilistic Forecasts of Wind Power},
+  journal   = {Power Systems, IEEE Transactions on},
+  year      = {2007},
+  volume    = {22},
+  number    = {3},
+  pages     = {1148-1156},
+  month     = {aug.},
+  issn      = {0885-8950},
+  abstract  = {Due to the fluctuating nature of the wind resource, a wind power producer participating in a liberalized electricity market is subject to penalties related to regulation costs. Accurate forecasts of wind generation are therefore paramount for reducing such penalties and thus maximizing revenue. Despite the fact that increasing accuracy in spot forecasts may reduce penalties, this paper shows that, if such forecasts are accompanied with information on their uncertainty, i.e., in the form of predictive distributions, then this can be the basis for defining advanced strategies for market participation. Such strategies permit to further increase revenues and thus enhance competitiveness of wind generation compared to other forms of dispatchable generation. This paper formulates a general methodology for deriving optimal bidding strategies based on probabilistic forecasts of wind generation, as well as on modeling of the sensitivity a wind power producer may have to regulation costs. The benefits resulting from the application of these strategies are clearly demonstrated on the test case of the participation of a multi-MW wind farm in the Dutch electricity market over a year.},
+  doi       = {10.1109/TPWRS.2007.901117},
+  file      = {Pinson2007.pdf:Pinson2007.pdf:PDF},
+  groups    = {OptimalBidding},
+  keywords  = {Dutch electricity market;liberalized electricity market;optimal bidding strategies;regulation costs;short-term probabilistic forecasts;wind farm;wind generation trading;costing;load forecasting;power generation economics;power markets;wind power plants;},
+  owner     = {pb},
+  timestamp = {2012.04.04},
+}
+
+@Article{Pinson2007a,
+  author    = {Pinson, Pierre and Nielsen, Henrik Aa. and Møller, Jan K. and Madsen, Henrik and Kariniotakis, George N.},
+  title     = {Non-parametric probabilistic forecasts of wind power: required properties and evaluation},
+  journal   = {Wind Energy},
+  year      = {2007},
+  volume    = {10},
+  number    = {6},
+  pages     = {497-516},
+  issn      = {1099-1824},
+  abstract  = {Predictions of wind power production for horizons up to 48–72 h ahead comprise a highly valuable input to the methods for the daily management or trading of wind generation. Today, users of wind power predictions are not only provided with point predictions, which are estimates of the conditional expectation of the wind generation for each look-ahead time, but also with uncertainty estimates given by probabilistic forecasts. In order to avoid assumptions on the shape of predictive distributions, these probabilistic predictions are produced from non-parametric methods, and then take the form of a single or a set of quantile forecasts. The required and desirable properties of such probabilistic forecasts are defined and a framework for their evaluation is proposed. This framework is applied for evaluating the quality of two statistical methods producing full predictive distributions from point predictions of wind power. These distributions are defined by a number of quantile forecasts with nominal proportions spanning the unit interval. The relevance and interest of the introduced evaluation framework are discussed. Copyright © 2007 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/we.230},
+  file      = {Pinson2007a.pdf:Pinson2007a.pdf:PDF},
+  groups    = {phdthesis, Forecasting, wind},
+  keywords  = {wind power, uncertainty, probabilistic forecasting, quality evaluation, reliability, sharpness, resolution, skill},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.04.15},
+  url       = {http://dx.doi.org/10.1002/we.230},
+}
+
+@Article{Pinson2009,
+  author    = {P. Pinson and T.S. Nielsen and H.Aa. Nielsen and N.K. Poulsen and H. Madsen},
+  title     = {Temperature prediction at critical points in district heating systems},
+  journal   = {European Journal of Operational Research},
+  year      = {2009},
+  volume    = {194},
+  number    = {1},
+  pages     = {163-176},
+  issn      = {0377-2217},
+  abstract  = {Current methodologies for the optimal operation of district heating systems use model predictive control. Accurate forecasting of the water temperature at critical points is crucial for meeting constraints related to consumers while minimizing the production costs for the heat supplier. A new forecasting methodology based on conditional finite impulse response (cFIR) models is introduced, for which model coefficients are replaced by coefficient functions of the water flux at the supply point and of the time of day, allowing for nonlinear variations of the time delays. Appropriate estimation methods for both are described. Results are given for the test case of the Roskilde district heating system over a period of more than 6 years. The advantages of the proposed forecasting methodology in terms of a higher forecast accuracy, its use for simulation purposes, or alternatively for better understanding transfer functions of district heating systems, are clearly shown.},
+  doi       = {10.1016/j.ejor.2007.11.065},
+  file      = {Pinson2009.pdf:Pinson2009.pdf:PDF},
+  groups    = {single house forecasting, Forecasting, load},
+  keywords  = {Forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.22},
+}
+
+@Book{Poulsen2007,
+  Title                    = {Stokastisk Adaptiv Regulering},
+  Author                   = {Niels Kjølstad Poulsen},
+  Publisher                = {IMM, DTU},
+  Year                     = {2007},
+  Edition                  = {15. January 2007},
+
+  File                     = {:/home/pb/literature/books/stokastisk_adaptiv_regulering.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.05}
+}
+
+@Article{Privara2013,
+  Title                    = {Use of partial least squares within the control relevant identification for buildings },
+  Author                   = {Samuel Prívara and Jiří Cigler and Zdeněk Váňa and Frauke Oldewurtel and Eva Žáčeková},
+  Journal                  = {Control Engineering Practice },
+  Year                     = {2013},
+  Number                   = {1},
+  Pages                    = {113-121},
+  Volume                   = {21},
+
+  Abstract                 = {Climate changes, diminishing world supplies of non-renewable fuels, as well as economic aspects are probably the most significant driving factors of the current effort to save energy. As buildings account for about 40&#xa0;% of global final energy use, efficient building climate control can significantly contribute to the saving effort. Predictive building automation can be used to operate buildings in an energy and cost effective manner with minimum retrofitting requirements. In such a predictive control approach, dynamic building models are of crucial importance for a good control performance. An algorithm which has not been used in building modeling yet, namely a combination of minimization of multi-step ahead prediction errors and partial least squares will be investigated. Subsequently, two case studies are presented: the first is an artificial model of a building constructed in Trnsys environment, while the second is a real-life case study. The proposed identification algorithm is then validated and tested. },
+  Doi                      = {http://dx.doi.org/10.1016/j.conengprac.2012.09.017},
+  ISSN                     = {0967-0661},
+  Keywords                 = {Predictive control relevant identification},
+  Owner                    = {pb},
+  Timestamp                = {2013.11.18},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0967066112002006}
+}
+
+@Article{Quiroga2004,
+  author    = {Quiroga, R. Quian and Nadasdy, Z. and Ben-Shaul, Y.},
+  title     = {Unsupervised Spike Detection and Sorting with Wavelets and Superparamagnetic Clustering},
+  journal   = {Neural Computation},
+  year      = {2004},
+  volume    = {16},
+  number    = {8},
+  pages     = {1661--1687},
+  month     = aug,
+  issn      = {0899-7667},
+  booktitle = {Neural Computation},
+  comment   = {doi: 10.1162/089976604774201631},
+  doi       = {10.1162/089976604774201631},
+  file      = {Quiroga2004.pdf:Quiroga2004.pdf:PDF},
+  groups    = {kernelpaper},
+  owner     = {pb},
+  publisher = {MIT Press},
+  timestamp = {2013.11.28},
+  url       = {http://dx.doi.org/10.1162/089976604774201631},
+}
+
+@Manual{R2011,
+  Title                    = {R: A Language and Environment for Statistical Computing},
+
+  Address                  = {Vienna, Austria},
+  Author                   = {{R Development Core Team}},
+  Note                     = {{ISBN} 3-900051-07-0},
+  Organization             = {R Foundation for Statistical Computing},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2012.01.30},
+  Url                      = {http://www.R-project.org/}
+}
+
+@Article{Rabl1988,
+  author                     = {Rabl, A.},
+  title                      = {Parameter-estimation in buildings - methods for dynamic analysis of measured energy use},
+  journal                    = {Journal of Solar Energy Engineering-transactions of the ASME},
+  year                       = {1988},
+  volume                     = {110},
+  number                     = {1},
+  pages                      = {52-66},
+  issn                       = {0199-6231},
+  file                       = {Rabl1988.pdf:Rabl1988.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {47},
+  owner                      = {pb},
+  review                     = {Good overview of different heat dyn. modelling approaches},
+  times-cited                = {10},
+  timestamp                  = {2010.11.15},
+  unique-id                  = {ISI:A1988M172500009},
+}
+
+@Article{Rabl1991,
+  author                     = {Rabl, A and Norford, Lk},
+  title                      = {Peak load reduction by preconditioning buildings at night},
+  journal                    = {International Journal of Energy Research},
+  year                       = {1991},
+  volume                     = {15},
+  number                     = {9},
+  pages                      = {781-798},
+  month                      = {DEC},
+  issn                       = {0363-907X},
+  abstract                   = {Cooling loads during the peak period can be reduced if a building is subcooled a few degrees below its normal thermostat set-point during the preceding night. During the day the thermostat must control the warm-up in such a way that the stored energy is released when it brings maximum benefit. This strategy exploits the heat capacity of the building itself; it can be implemented at low cost and without sacrifice of comfort. To evaluate the strategy for a given building and to control the thermostat, one needs to know the thermal behaviour of the building. Building temperature, internal load and solar data are used to determine a small number of thermal parameters which characterize the building. Several thermostat control strategies, distinguished by their knowledge of the building dynamics, are described and simulated, with a data-based dynamic model. Universal graphs for effective storage capacity and storage efficiency are developed. A simple economic analysis shows that subcooling is likely to be cost-effective for many commercial buildings, with current electric rate schedules.},
+  groups                     = {Buildings},
+  number-of-cited-references = {0},
+  owner                      = {pb},
+  times-cited                = {27},
+  timestamp                  = {2010.11.15},
+  unique-id                  = {ISI:A1991GR04100008},
+}
+
+@Article{Rabl1992,
+  author                     = {Rabl, A and Rialhe, A},
+  title                      = {Energy signature models for commercial buildings - test with measured data and interpretation},
+  journal                    = {Energy and Buildings},
+  year                       = {1992},
+  volume                     = {19},
+  number                     = {2},
+  pages                      = {143-154},
+  issn                       = {0378-7788},
+  abstract                   = {The purpose of this paper is twofold: to see if the application of the energy signature model PRISM to commercial buildings can be improved by adding occupancy as a variable, and to examine what one can learn from the individual parameters that have been identified. Using occupancy rate as proxy for the operating mode of the building and its HVAC system, the model is generalized by doubling the number of parameters and distinguishing two types of day, occupied and unoccupied. This approach is tested with measured consumption data for some fifty commercial buildings. The results show that occupancy data can bring appreciable improvement in the accuracy of the model. However, the interpretation of the individual parameters, such as slope and balance point temperature, should be undertaken with great caution. Due to various biases the discrepancy between the parameters identified by an energy signature model and the true values can differ by far more than the standard errors indicated by the regression. Such biases are particularly important in commercial buildings, as we demonstrate with several examples.},
+  file                       = {Rabl1992.pdf:Rabl1992.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {0},
+  owner                      = {pb},
+  review                     = {Doesn't seem to be interesting concerning modelling techniques.},
+  times-cited                = {12},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:A1992KE43300008},
+}
+
+@Article{Rahimiyan2011,
+  author    = {Rahimiyan, Morteza and Morales, Juan M and Conejo, Antonio J},
+  title     = {Evaluating alternative offering strategies for wind producers in a pool},
+  journal   = {Applied Energy},
+  year      = {2011},
+  volume    = {88},
+  number    = {12},
+  pages     = {4918--4926},
+  groups    = {Markets},
+  owner     = {pb},
+  publisher = {Elsevier},
+  timestamp = {2014.09.26},
+}
+
+@Article{Raiffa1974,
+  author    = {Raiffa, Howard},
+  title     = {Applied statistical decision theory},
+  year      = {1974},
+  groups    = {OptimalBidding},
+  owner     = {pb},
+  publisher = {Div. of Research, Graduate School of Business Administration, Harvard Univ.},
+  timestamp = {2015.12.18},
+}
+
+@Article{Reddy1991,
+  author    = {T.A. Reddy and L.K. Norford and W. Kempton},
+  title     = {Shaving residential air-conditioner electricity peaks by intelligent use of the building thermal mass},
+  journal   = {Energy},
+  year      = {1991},
+  volume    = {16},
+  number    = {7},
+  pages     = {1001-1010},
+  issn      = {0360-5442},
+  abstract  = {The residential air-conditioning load is a significant component of electric utility peak demand, which typically occurs on very hot summer afternoons. Efforts by utilities to shave or shift air-conditioning demand to off-peak periods in the day have been spurred by low-cost electronics and include such strategies as direct-load control and price-induced local control by homeowners. We propose alternative practical strategies of peak shaving that use the opportunities offered by modern electronics as well as a more intelligent use of the thermal mass storage inherent in the structure and furnishings of the house. Using the framework of a simplified electrical analogue, we predict the thermal performance of the residence when the air-conditioner is switched off and illustrate the validity of such simplified estimates with monitored data from an actual residence. Finally, we discuss practical aspects related to the implementation of these strategies, particularly as to what is needed in terms of electronic sophistication of the thermostat.},
+  doi       = {10.1016/0360-5442(91)90060-Y},
+  file      = {Reddy1991.pdf:Reddy1991.pdf:PDF},
+  groups    = {single house forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.20},
+}
+
+@Article{Reid1977,
+  author    = {Reid, J.},
+  title     = {Structural identifiability in linear time-invariant systems},
+  journal   = {Automatic Control, IEEE Transactions on},
+  year      = {1977},
+  volume    = {22},
+  number    = {2},
+  pages     = {242-246},
+  month     = {Apr},
+  issn      = {0018-9286},
+  abstract  = {A matrix-operator representation of parameter sensitivities is used to provide an algebraic "structural" analysis of local parameter identifiability in linear time-invariant ordinary differential equation systems. Necessary conditions for identifiability depend only upon the system matrices, no integrals must be computed, and arbitrary parametrization may be used. Relations to insensitivity are discussed, and design techniques are suggested which use the nonidentifiable (insensitive) subspace to systematically reduce the number of exciting parameters in individually designed parameter identification experiments. Finally, sufficiency conditions for zero-state identifiability are examined in terms of control inputs which continually excite the natural modes of the parameter sensitivities.},
+  doi       = {10.1109/TAC.1977.1101474},
+  file      = {Reid1977.pdf:Reid1977.pdf:PDF},
+  groups    = {Identifiability},
+  keywords  = {Linear systems, time-invariant continuous-time;Parameter identification;Sensitivity analysis;Automatic control;Control systems;Frequency domain analysis;Linear systems;Observability;Parameter estimation;Polynomials;Riccati equations;Stability;Transfer functions},
+  owner     = {pb},
+  timestamp = {2014.09.19},
+}
+
+@Article{Reindl1990,
+  author    = {D.T. Reindl and W.A. Beckman and J.A. Duffie},
+  title     = {Evaluation of hourly tilted surface radiation models},
+  journal   = {Solar Energy},
+  year      = {1990},
+  volume    = {45},
+  number    = {1},
+  pages     = {9-17},
+  issn      = {0038-092X},
+  abstract  = {This study investigates the performance of the isotropic and four anisotropic hourly tilted surface radiation models by using monthly average hourly utilizable energy as a standard of measure. Utilizable energy is the radiation above a specified threshold level. Differences between the utilizable energy measured and the utilizable energy predicted are observed for various surface slope/azimuth orientations and critical radiation levels. Normalized root mean square difference and normalized mean bias difference statistics are formed to quantify the ability of each model to estimate the utilizable energy on a tilted surface. The influence of horizontal diffuse radiation on tilted surface model performance is examined by comparing the predicted utilizable energy on a tilted surface using both measured horizontal diffuse and estimated horizontal diffuse found from diffuse fraction correlations. On an overall basis, the isotropic sky model showed the poorest performance and is not recommended for estimating the hourly radiation on a tilted surface. The anisotropic models have comparable performance to each other. There was no significant degradation of tilted surface model performance when the diffuse radiation is estimated from a diffuse fraction correlation rather than obtained from measurements.},
+  doi       = {10.1016/0038-092X(90)90061-G},
+  file      = {Reindl1990.pdf:Reindl1990.pdf:PDF},
+  groups    = {single house forecasting},
+  owner     = {pb},
+  timestamp = {2012.03.27},
+}
+
+@Article{Reynders2014,
+  Title                    = {Quality of grey-box models and identified parameters as function of the accuracy of input and observation signals},
+  Author                   = {G. Reynders and J. Diriken and D. Saelens},
+  Journal                  = {Energy and Buildings },
+  Year                     = {2014},
+  Pages                    = {263 - 274},
+  Volume                   = {82},
+
+  Abstract                 = {Abstract The integration of buildings in a Smart Grid, enabling demand-side management and thermal storage, requires robust reduced-order building models that allow for the development and evaluation of demand-side management control strategies. To develop such models for existing buildings, with often unknown the thermal properties, data-driven system identification methods are proposed. In this paper, system identification is carried out to identify suitable reduced-order models. Therefore, grey-box models of increasing complexity are identified on results from simulations with a detailed physical model, deployed in the integrated district energy assessment simulation (IDEAS) package in Modelica. Firstly, the robustness of identified grey-box models for day-ahead predictions and simulations of the thermal response of a dwelling, as well as the physical interpretation of the identified parameters, are analyzed. The influence of the identification dataset is quantified, comparing the added value of dedicated identification experiments against identification on data from in use buildings. Secondly, the influence of the data used for identification on model performance and the reliability of the parameter estimates is quantified. Both alternative measurements and the influence of noise on the data are considered. },
+  Doi                      = {http://dx.doi.org/10.1016/j.enbuild.2014.07.025},
+  ISSN                     = {0378-7788},
+  Keywords                 = {Reduced-order models},
+  Owner                    = {pb},
+  Timestamp                = {2016.07.29},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0378778814005623}
+}
+
+@Article{Richalet2001,
+  author                     = {Richalet, V and Neirac, FP and Tellez, F and Marco, J and Bloem, JJ},
+  title                      = {HELP (house energy labeling procedure): methodology and present results},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2001},
+  volume                     = {33},
+  number                     = {3},
+  pages                      = {229-233},
+  month                      = {FEB},
+  issn                       = {0378-7788},
+  note                       = {2nd European Conf on Energy Performance and Indoor Climate in Building/3rd Int Conf on Indoor Air Quality, Ventilation and Energy Conservation in Buildings, LYON, FRANCE, NOV 19-21, 1998},
+  abstract                   = {The problem of energy certification in the housing stock remains a priority for the European Community Member states referring to the European Directive N. 93/76/EEC. It is also a certainty that the potential for energy conservation in the existing buildings is very large, and there is a need for a rating of their energy performance. Facing this problem and the lack of tools, we chose to develop a measurement based approach, often referred to as an identification method. The principle of this method is to derive the thermal behavior from a continuous recording of internal temperature within the building in response to outdoor climate (temperature and solar radiation) and internal loads (heating and appliances). The derived parameters are then used to calculate a normalized heating annual consumption (NHAC) for a standard climate and a standard operation of the building (set point temperature, air change rate and casual gains). The paper presents the proposed methodology for single family houses, including the experimental equipment, the monitoring protocol and the calculation tool. Results are also discussed for a set of 10 monitored houses within Europe. Although some limitations of the methodology were found, that will require deeper investigations, the NHAC was found a robust indicator to deal with occupants' influence. (C) 2001 Elsevier Science B.V. All rights reserved.},
+  file                       = {Richalet2001.pdf:Richalet2001.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {7},
+  owner                      = {pb},
+  review                     = {Description of energy labeling procedure. Not interesting modelling technique wise, some linear regression applied.},
+  times-cited                = {5},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000166418000008},
+}
+
+@Article{Rigollier2000,
+  author    = {Christelle Rigollier and Olivier Bauer and Lucien Wald},
+  title     = {On the clear sky model of the ESRA - European Solar Radiation Atlas - with respect to the heliosat method},
+  journal   = {Solar Energy},
+  year      = {2000},
+  volume    = {68},
+  number    = {1},
+  pages     = {33-48},
+  issn      = {0038-092X},
+  abstract  = {This paper presents a clear-sky model, which has been developed in the framework of the new digital European Solar Radiation Atlas (ESRA). This ESRA model is described and analysed with the main objective of being used to estimate solar radiation at ground level from satellite images with the Heliosat method. Therefore it is compared to clear-sky models that have already been used in the Heliosat method. The diffuse clear-sky irradiation estimated by this ESRA model and by other models has been also checked against ground measurements, for different ranges of the Linke turbidity factor and solar elevation. The results show that the ESRA model is the best one with respect to robustness and accuracy. The r.m.s. error in the estimation of the hourly diffuse irradiation ranges from 11 Wh m−2 to 35 Wh m−2 for diffuse irradiation up to 250 Wh m−2. The good results obtained with such a model are due to the fact that it takes into account the Linke turbidity factor and the elevation of the site, two factors that influence the incoming solar radiation. In return, it implies the knowledge of these factors at each pixel of the satellite image for the application of the Heliosat method.},
+  doi       = {10.1016/S0038-092X(99)00055-9},
+  file      = {Rigollier2000.pdf:Rigollier2000.pdf:PDF},
+  groups    = {correction},
+  owner     = {pb},
+  timestamp = {2012.03.17},
+}
+
+@Article{Robinson1983,
+  Title                    = {NONPARAMETRIC ESTIMATORS FOR TIME SERIES},
+  Author                   = {Robinson, P. M.},
+  Journal                  = {Journal of Time Series Analysis},
+  Year                     = {1983},
+  Number                   = {3},
+  Pages                    = {185-207},
+  Volume                   = {4},
+
+  Abstract                 = {Abstract. Kernel multivariate probability density and regression estimators are applied to a univariate strictly stationary time series Xr We consider estimators of the joint probability density of Xt at different t-values, of conditional probability densities, and of the conditional expectation of functionals of Xv given past behaviour. The methods seem of particular relevance in light of recent interest in non-Gaussian time series models. Under a strong mixing condition multivariate central limit theorems for estimators at distinct points are established, the asymptotic distributions being of the same nature as those which would derive from independent multivariate observations.},
+  Doi                      = {10.1111/j.1467-9892.1983.tb00368.x},
+  ISSN                     = {1467-9892},
+  Keywords                 = {Kernel estimators, stationary time series, central limit theorem, strong mixing condition, non-Gaussian time series models, nonlinear prediction},
+  Owner                    = {pb},
+  Publisher                = {Blackwell Publishing Ltd},
+  Timestamp                = {2013.11.18},
+  Url                      = {http://dx.doi.org/10.1111/j.1467-9892.1983.tb00368.x}
+}
+
+@Article{Ronnelid1997,
+  author    = {Rönnelid, Mats and Perers, Bengt and Karlsson, Björn},
+  title     = {On the factorisation of incidence angle modifiers for CPC collectors},
+  journal   = {Solar Energy},
+  year      = {1997},
+  volume    = {59},
+  number    = {4-6},
+  pages     = {281-286},
+  issn      = {0038092x},
+  abstract  = {It has been suggested earlier that the incidence angle modifier Kτα for low concentrating collectors with tubular absorbers could be factorised according to Kτα(Î?t,Î?1) ∝ Kτα(Î?t,0)Kτα(0,Î?1), where Î?t and Î?1 are the projected incidence angles in the transversal and longitudinal projection planes, respectively. Ray-tracing calculations on low-concentrating CPC collectors with flat absorbers parallel to the cover show that a Kτα factorisation overestimates the annual delivered energy from the collector by about 4-5\%, when compared to calculations using the full incidence angle modifier. Data from outdoor testing has been used for characterization of incidence angle behaviour for a truncated CPC with a concentration of C = 1.56. Multiple linear regression analysis was used. This analysis technique makes feasible the determination of angular dependent incident angle modifiers and is an efficient tool to use for all collectors which cannot be characterised by standard equations of the incidence angle dependence.},
+  file      = {Ronnelid1997.pdf:Ronnelid1997.pdf:PDF},
+  groups    = {Collector testing},
+  owner     = {pb},
+  timestamp = {2011.02.24},
+}
+
+@Article{Roulet2002,
+  author                     = {Roulet, CA and Flourentzou, F and Labben, HH and Santamouris, M and Koronaki, I and Dascalaki, E and Richalet, V},
+  title                      = {ORME: A multicriteria rating methodology for buildings},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {2002},
+  volume                     = {37},
+  number                     = {6},
+  pages                      = {579-586},
+  month                      = {JUN},
+  issn                       = {0360-1323},
+  abstract                   = {To check the compliance of a building with regulations, evaluate the efficiency of retrofit, or even label a building one would in most cases perform a comparison of a number of building qualities. Within the framework of the European Joule-Thermic OFFICE project, a multicriteria rating methodology was developed for this purpose, based on a rating method that uses principal component analysis, and a ranking method that uses a partial aggregation technique. The aim of this methodology is to rate or to rank office buildings and retrofit scenarios of the same building according to an extended list of parameters, including: energy use for heating, cooling and other appliances, impact on external environment, indoor environment quality, cost. The paper presents the principles used in the methodology, and some examples of application to actual buildings. More information is given in a complete report (ORME-Office building rating methodology for Europe, Office Project Report, University of Athens, 1999). (C) 2002 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Roulet2002.pdf:Roulet2002.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {23},
+  owner                      = {pb},
+  review                     = {procedure for energy labelling},
+  times-cited                = {10},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000176543900005},
+}
+
+@Article{Roulston2003,
+  author    = {M.S. Roulston and D.T. Kaplan and J. Hardenberg and L.A. Smith},
+  title     = {Using medium-range weather forcasts to improve the value of wind energy production},
+  journal   = {Renewable Energy},
+  year      = {2003},
+  volume    = {28},
+  number    = {4},
+  pages     = {585 - 602},
+  issn      = {0960-1481},
+  abstract  = {The value of different strategies for consolidating the information in European Centre for Medium Range Weather Forecasting (ECMWF) forecasts to wind energy generators is investigated. Simulating the performance of generators using the different strategies in the context of a simplified electricity market revealed that \{ECMWF\} forecasts in production decisions improved the performance of generators at lead times of up to 6 days. Basing half-hourly production decisions on a production forecast generated by condtioning the climate on the \{ECMWF\} operational ensemble forecast yields the best results of all the strategies tested. },
+  doi       = {http://dx.doi.org/10.1016/S0960-1481(02)00054-X},
+  file      = {Roulston2003.pdf:Roulston2003.pdf:PDF},
+  groups    = {OptimalBidding},
+  owner     = {pb},
+  timestamp = {2015.12.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S096014810200054X},
+}
+
+@Book{Rousseeuw2005,
+  Title                    = {Robust regression and outlier detection},
+  Author                   = {Rousseeuw, Peter J and Leroy, Annick M},
+  Publisher                = {Wiley. com},
+  Year                     = {2005},
+  Volume                   = {589},
+
+  Owner                    = {pb},
+  Timestamp                = {2013.11.18}
+}
+
+@Article{Ruiz-Arias2010,
+  author    = {Ruiz-Arias, J.A. and Alsamamra, H. and Tovar-Pescador, J. and Pozo-Vázquez, D.},
+  title     = {Proposal of a regressive model for the hourly diffuse solar radiation under all sky conditions},
+  journal   = {Energy Conversion and Management},
+  year      = {2010},
+  volume    = {51},
+  number    = {5},
+  pages     = {881-893},
+  issn      = {01968904},
+  abstract  = {In this work, we propose a new regressive model for the estimation of the hourly diffuse solar irradiation under all sky conditions. This new model is based on the sigmoid function and uses the clearness index and the relative optical mass as predictors. The model performance was compared against other five regressive models using radiation data corresponding to 21 stations in the USA and Europe. In a first part, the 21 stations were grouped into seven subregions (corresponding to seven different climatic regions) and all the models were locally-fitted and evaluated using these seven datasets. Results showed that the new proposed model provides slightly better estimates. Particularly, this new model provides a relative root mean square error in the range 25–35\% and a relative mean bias error in the range −15\% to 15\%, depending on the region. In a second part, the potential global character of the new model was evaluated. To this end, the model was fitted using the whole dataset. Results showed that the global fitting model provides overall better estimates that the locally-fitted models, with relative root mean square error values ranging 20–35\% and a relative mean bias error ranging −5\% to −12\%. Additionally, the new proposed model showed some advantages compared to other evaluated models. Particularly, the sigmoid behaviour of this model is able to provide physically reliable estimates for extreme values of the clearness index even though using less parameter than other tested models.},
+  doi       = {10.1016/j.enconman.2009.11.024},
+  file      = {Ruiz-Arias2010.pdf:Ruiz-Arias2010.pdf:PDF},
+  groups    = {single house forecasting, correction},
+  owner     = {pb},
+  timestamp = {2012.01.29},
+}
+
+@Article{Saez-Gallego2014,
+  author    = {Saez-Gallego, Javier and Morales, Juan M and Madsen, Henrik and J{\'o}nsson, Tryggvi},
+  title     = {Determining reserve requirements in DK1 area of Nord Pool using a probabilistic approach},
+  journal   = {Energy},
+  year      = {2014},
+  groups    = {Planning},
+  owner     = {pb},
+  publisher = {Pergamon},
+  timestamp = {2014.09.26},
+}
+
+@TechReport{Saint-Aubain2012,
+  author      = {Saint-Aubain, Philip Anton de and Bacher, Peder and Nielsen, Henrik Aalborg and Madsen, Henrik},
+  title       = {Methods for splitting readings of total energy consumption},
+  institution = {DTU Informatics},
+  year        = {2012},
+  type        = {{iPower report - Work Package 1}},
+  file        = {Saint-Aubain2012.pdf:Saint-Aubain2012.pdf:PDF},
+  groups      = {Other models},
+  owner       = {pb},
+  timestamp   = {2012.10.23},
+}
+
+@MastersThesis{Saint-Aubain2011,
+  Title                    = {Adaptive Load Forecasting},
+  Author                   = {Saint-Aubain, P. d.},
+  School                   = {Technical University of Denmark},
+  Year                     = {2011},
+
+  Abstract                 = {The purpose of this thesis is to contribute to the research in forecasting energy consumption in residential houses. The work is motivated by the Danish iPower project, which deals with investigation of possibilities for replacing fossil fuel with renewable energy. Renewable energy in Denmark is mostly based on wind power which is a highly fluctuating energy source and it is difficult to conserve. Energy consumption is also varying but independent of supply to the power plant. The fact that energy supply and energy consumption is not synchronized could be handled with a methodology that facilitates using the energy when present. The present work provides an adaptive method to get detailed knowledge of the energy consumption in residential houses. The method will be a contribution to forecasting energy consumption and to the development of Smart Grid technology. The approach taken is to reveal the details in the heating consumption in residential houses by developing mathematical models for the heat load. Based on district heating consumption data from four houses in a small area in Denmark and data from a nearby meteorological station, models are developed for separating the heating signals into different components. One of the models is able to split the overall consumption into heating consumption and hot water consumption. The heating consumption is further separated into parts explained by diurnal variation and variation explained by changes in outdoor temperature and the amount of solar radiation present. The method is adaptive to changes in the consumption due to variation in the daily routine of the inhabitants. The results are obtained by using mathematical modeling, statistics and time series analysis. For separating the hot water consumption and heating Low Pass Filters and advanced Kernel Smoothing techniques are used. The Kernel Smoother is extended to contain robust estimation and polynomial shape kernels. The further separation of the heating consumption is done with Kalman Filter techniques for signal separation.},
+  File                     = {Saint-Aubain2011.pdf:Saint-Aubain2011.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.26}
+}
+
+@InProceedings{Sayeef2011,
+  author    = {Sayeef, Saad and Moore, Tim and Percy, Steven and Cornforth, David and Ward, John and Rowe, Daniel},
+  title     = {Characterisation and Integration of High Penetration Solar Power in Australia – A Solar Intermittency Study},
+  booktitle = {1st International Workshop on the Integration of Solar Power into Power Systems Aarhus, Denmark, 24 October 2011},
+  year      = {2011},
+  file      = {Sayeef2011.pdf:Sayeef2011.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.04.01},
+}
+
+@Article{Schick1998,
+  author    = {Schick, A.},
+  title     = {An Adaptive Estimator of the Autocorrelation Coefficient in Regression Models with Autoregressive Errors},
+  journal   = {Journal of Time Series Analysis},
+  year      = {1998},
+  volume    = {19},
+  number    = {5},
+  pages     = {575-589},
+  issn      = {01439782},
+  abstract  = {In this paper an adaptive estimator of the autocorrelation coefficient is constructed in regression models whose error variables follow a stationary autoregressive process of order 1. Examples of nonparametric, additive and semiparametric regression models are discussed.},
+  copyright = {© $YEAR$ Blackwell Publishers},
+  file      = {Schick1998.pdf:Schick1998.pdf:PDF},
+  groups    = {Adaptive Autocorrelation},
+  owner     = {pb},
+  publisher = {Blackwell Publishers},
+  timestamp = {2011.01.07},
+}
+
+@InProceedings{Schmelter2011,
+  author    = {Schmelter, Jan and Lange, Matthias and Focken, Ulrich},
+  title     = {Weather class depending combinations of solar power forecasts – operational experiences},
+  booktitle = {1st International Workshop on the Integration of Solar Power into Power Systems Aarhus, Denmark, 24 October 2011},
+  year      = {2011},
+  file      = {Schmelter2011.pdf:Schmelter2011.pdf:PDF},
+  groups    = {forecasting},
+  owner     = {pb},
+  timestamp = {2012.04.01},
+}
+
+@Article{Shah1991,
+  Title                    = {Recursive least squares based estimation schemes for self-tuning control},
+  Author                   = {Shah, Sirish L. and Cluett, William R.},
+  Journal                  = {The Canadian Journal of Chemical Engineering},
+  Year                     = {1991},
+  Number                   = {1},
+  Pages                    = {89--96},
+  Volume                   = {69},
+
+  Doi                      = {10.1002/cjce.5450690111},
+  File                     = {Shah1991.pdf:Shah1991.pdf:PDF},
+  ISSN                     = {1939-019X},
+  Keywords                 = {parameter estimation, recursive least squares, self-tuning control},
+  Owner                    = {pbac},
+  Publisher                = {Wiley Subscription Services, Inc., A Wiley Company},
+  Timestamp                = {2017.03.29},
+  Url                      = {http://dx.doi.org/10.1002/cjce.5450690111}
+}
+
+@Article{Siroky2011,
+  author    = {Jan Široký and Frauke Oldewurtel and Jiří Cigler and Samuel Prívara},
+  title     = {Experimental analysis of model predictive control for an energy efficient building heating system},
+  journal   = {Applied Energy},
+  year      = {2011},
+  volume    = {88},
+  number    = {9},
+  pages     = {3079-3087},
+  issn      = {0306-2619},
+  abstract  = {Low energy buildings have attracted lots of attention in recent years. Most of the research is focused on the building construction or alternative energy sources. In contrary, this paper presents a general methodology of minimizing energy consumption using current energy sources and minimal retrofitting, but instead making use of advanced control techniques. We focus on the analysis of energy savings that can be achieved in a building heating system by applying model predictive control (MPC) and using weather predictions. The basic formulation of MPC is described with emphasis on the building control application and tested in a two months experiment performed on a real building in Prague, Czech Republic.},
+  doi       = {10.1016/j.apenergy.2011.03.009},
+  file      = {Siroky2011.pdf:Siroky2011.pdf:PDF},
+  groups    = {phdthesis},
+  keywords  = {Building heating system},
+  owner     = {pb},
+  timestamp = {2012.04.13},
+}
+
+@Misc{Solarkeydk,
+  Title                    = {{Homepage and database (all tested collectors according to EN12975 in Europe)}},
+
+  Author                   = {{Solar Keymark}},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2011.08.11},
+  Url                      = {http://solarkey.dk/solarkeymarkdata/qCollectorCertificates/ShowQCollectorCertificatesTable.aspx}
+}
+
+@PhdThesis{Sonderegger1978,
+  author    = {Sonderegger, R. C.},
+  title     = {Dynamic models of house heating based on equivalent thermal parameters},
+  school    = {Princeton Univ., NJ.},
+  year      = {1978},
+  adsnote   = {Provided by the SAO/NASA Astrophysics Data System},
+  adsurl    = {http://adsabs.harvard.edu/abs/1978PhDT.......107S},
+  groups    = {Buildings},
+  keywords  = {DYNAMIC MODELS, ENERGY CONSUMPTION, HEATING EQUIPMENT, RESIDENTIAL AREAS, THERMAL DIFFUSION, ANALOG CIRCUITS, COMPUTERIZED SIMULATION, CONSTRUCTION, SOLAR HEATING},
+  owner     = {pb},
+  timestamp = {2010.11.16},
+}
+
+@Article{Song2014,
+  Title                    = {Short-term wind speed forecasting with Markov-switching model },
+  Author                   = {Zhe Song and Yu Jiang and Zijun Zhang},
+  Journal                  = {Applied Energy },
+  Year                     = {2014},
+  Number                   = {0},
+  Pages                    = {103 - 112},
+  Volume                   = {130},
+
+  Abstract                 = {Abstract A Markov-switching model in wind speed forecasting is examined in this research. The proposed method employs a regime switching process governed by a discrete-state Markov chain to model the nonlinear evolvement of the wind speed time-series. A Bayesian inference rather than the traditional maximum likelihood estimation is applied to evaluate the parameters of the Markov-switching model. Unlike the traditional point forecast of wind speeds, the Markov-switching model can offer both of the point and interval wind speed forecast. To examine the forecasting performance of the Markov-switching model, four wind speed forecasting models, the persistent model, the autoregressive model, the neural networks model, and the Bayesian structural break model, are employed as baselines. Wind speed data collected from utility-scale wind turbines are utilized for the model development and the computational results demonstrate that the Markov-switching model is promising in wind speed forecasting. },
+  Doi                      = {http://dx.doi.org/10.1016/j.apenergy.2014.05.026},
+  ISSN                     = {0306-2619},
+  Keywords                 = {Time series},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.24},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0306261914005212}
+}
+
+@Article{Spliid1983,
+  Title                    = {A Fast Estimation Method for the Vector Autoregressive Moving Average Model With Exogenous Variables},
+  Author                   = {Henrik Spliid},
+  Journal                  = {Journal of the American Statistical Association},
+  Year                     = {1983},
+  Number                   = {384},
+  Pages                    = {843-849},
+  Volume                   = {78},
+
+  __markedentry            = {[pb:6]},
+  Abstract                 = {A very fast and simple algorithm for estimation of the parameters of large multivariate time series and distributed lag models is presented. An analysis of the distribution of the estimates shows that they are asymptotically normal and unbiased, and that they have a variance that decreases like 1/n, n being the sample size. The algorithm is especially applicable for estimation of large multivariate models where it is generally many times faster than maximalization algorithms.},
+  ISSN                     = {01621459},
+  Owner                    = {pb},
+  Publisher                = {[American Statistical Association, Taylor \& Francis, Ltd.]},
+  Timestamp                = {2016.06.21},
+  Url                      = {http://www.jstor.org/stable/2288194}
+}
+
+@Article{Stephenson2005,
+  author    = {Stephenson, Alec and Gilleland, Eric},
+  title     = {Software for the analysis of extreme events: The current state and future directions},
+  journal   = {Extremes},
+  year      = {2005},
+  volume    = {8},
+  number    = {3},
+  pages     = {87--109},
+  file      = {Stephenson2005.pdf:Stephenson2005.pdf:PDF},
+  groups    = {EVT},
+  owner     = {pb},
+  publisher = {Springer},
+  quality   = {1},
+  timestamp = {2014.04.07},
+}
+
+@Article{Strachan1993,
+  author                     = {Strachan, P},
+  title                      = {Model validation using the passys test cells},
+  journal                    = {Building and Environment},
+  year                       = {1993},
+  volume                     = {28},
+  number                     = {2},
+  pages                      = {153-165},
+  month                      = {APR},
+  issn                       = {0360-1323},
+  abstract                   = {This paper focuses on the empirical whole model validation effort being undertaken as part of the CEC PASSYS project, placing it in the context of an overall validation methodology. High quality datasets lie al the heart of empirical validation, and reasons are given as to why test cells are considered to provide the best available environment for their acquisition. Criteria are set out that help ensure that the collected datasets are of value for validation, and the various elements of an empirical validation methodology are elaborated. Finally, some preliminary results and analysis are presented.},
+  file                       = {Strachan1993.pdf:Strachan1993.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {0},
+  owner                      = {pb},
+  review                     = {Doesn't seem to be interesting concerning modelling techniques.},
+  times-cited                = {12},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:A1993LG71900007},
+}
+
+@Article{Stritih2003,
+  author                     = {Stritih, U},
+  title                      = {Heat transfer enhancement in latent heat thermal storage system for buildings},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2003},
+  volume                     = {35},
+  number                     = {11},
+  pages                      = {1097-1104},
+  month                      = {DEC},
+  issn                       = {0378-7788},
+  abstract                   = {In this article a review of heat storage technologies with phase change materials (PCMs) is given. In addition, we present paraffin as phase change material in solar heat storage wall with proposals of heat transfer enhancement. The mathematical model for heat transport in heat storage is made. The results obtained with the simulation gives the time dynamics of heat accumulation in phase change material with fins as the media for heat transport enhancement. We found out that the most influential of the parameters is the distance between the fins. (C) 2003 Elsevier B.V. All rights reserved.},
+  doi                        = {10.1016/j.enbuild.2003.07.001},
+  file                       = {Stritih2003.pdf:Stritih2003.pdf:PDF},
+  groups                     = {Buildings},
+  number-of-cited-references = {27},
+  owner                      = {pb},
+  times-cited                = {23},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000187782200003},
+}
+
+@Article{Tan2010,
+  Title                    = {Day-ahead electricity price forecasting using wavelet transform combined with \{ARIMA\} and \{GARCH\} models },
+  Author                   = {Zhongfu Tan and Jinliang Zhang and Jianhui Wang and Jun Xu},
+  Journal                  = {Applied Energy },
+  Year                     = {2010},
+  Number                   = {11},
+  Pages                    = {3606 - 3610},
+  Volume                   = {87},
+
+  Abstract                 = {This paper proposes a novel price forecasting method based on wavelet transform combined with \{ARIMA\} and \{GARCH\} models. By wavelet transform, the historical price series is decomposed and reconstructed into one approximation series and some detail series. Then each subseries can be separately predicted by a suitable time series model. The final forecast is obtained by composing the forecasted results of each subseries. This proposed method is examined on Spanish and \{PJM\} electricity markets and compared with some other forecasting methods. },
+  Doi                      = {http://dx.doi.org/10.1016/j.apenergy.2010.05.012},
+  File                     = {Tan2010.pdf:Tan2010.pdf:PDF},
+  ISSN                     = {0306-2619},
+  Keywords                 = {Price forecasting},
+  Owner                    = {pb},
+  Timestamp                = {2014.06.23},
+  Url                      = {http://www.sciencedirect.com/science/article/pii/S0306261910001807}
+}
+
+@Unpublished{Thygesen2009,
+  Title                    = {Diffusive transport and random walks},
+  Author                   = {Uffe Høgsbro Thygesen},
+  Note                     = {Course: SDE},
+
+  Month                    = {Feb.},
+  Year                     = {2009},
+
+  File                     = {:/home/pb/literature/courses/sde/transport.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.09}
+}
+
+@Unpublished{Thygesen2009a,
+  Title                    = {Probability spaces},
+  Author                   = {Uffe Høgsbro Thygesen},
+  Note                     = {Course: SDE},
+
+  Month                    = {Feb.},
+  Year                     = {2009},
+
+  File                     = {:/home/pb/literature/courses/sde/measure.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.09}
+}
+
+@Unpublished{Thygesen2009b,
+  Title                    = {Linear systems driven by noise},
+  Author                   = {Uffe Høgsbro Thygesen},
+  Note                     = {Course: SDE},
+
+  Month                    = {Feb.},
+  Year                     = {2009},
+
+  File                     = {:/home/pb/literature/courses/sde/linear.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.09}
+}
+
+@Unpublished{Thygesen2009c,
+  Title                    = {Exercise 1: Diffusive transport in shear flow},
+  Author                   = {Uffe Høgsbro Thygesen},
+  Note                     = {Course: SDE},
+
+  Month                    = {Feb.},
+  Year                     = {2009},
+
+  File                     = {:/home/pb/literature/courses/sde/Exercise-Shear-diffusion.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2009.02.09}
+}
+
+@Article{Trombe2012,
+  author    = {Trombe, Pierre-Julien and Pinson, Pierre and Madsen, Henrik},
+  title     = {A general probabilistic forecasting framework for offshore wind power fluctuations},
+  journal   = {Energies},
+  year      = {2012},
+  volume    = {5},
+  number    = {3},
+  pages     = {621--657},
+  file      = {Trombe2012.pdf:Trombe2012.pdf:PDF},
+  groups    = {Forecasting, wind},
+  owner     = {pb},
+  quality   = {1},
+  timestamp = {2014.06.24},
+}
+
+@TechReport{vinther2011hovedrapport,
+  Title                    = {Hovedrapport for Smart Grid Netværkets arbejde},
+  Author                   = {Vinther, D. and Dreyer, P. and Troi, A. and Aagaard, L. and Tang, J. and Nielsen, S.P. and Hjortkjær, A.G. and Balasiu, A. and Høyer, H.J. and Abildgaard, J. and others},
+  Institution              = {Klima-, Energi-og Bygningsministeriet},
+  Year                     = {2011},
+
+  File                     = {:/home/pb/j/literature/techreports/Hovedrapport_for_Smart_Grid_Netvaerkets_arbejde.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2013.01.03}
+}
+
+@Article{Wang1999,
+  author                     = {Wang, SW},
+  title                      = {Dynamic simulation of building VAV air-conditioning system and evaluation of EMCS on-line control strategies},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {1999},
+  volume                     = {34},
+  number                     = {6},
+  pages                      = {681-705},
+  month                      = {NOV},
+  issn                       = {0360-1323},
+  abstract                   = {Dynamic models are developed to simulate the thermal, hydraulic, environmental and mechanic characteristics and energy performance of a building and VAV air-conditioning system under the control of EMCS. Three on-line supervisory strategies and programs based on integrated EMCS stations are developed to optimise the VAV static pressure set-point, AHU outlet air temperature set-point and outdoor ventilation air flow set-point, The strategies and programs are commissioned and evaluated under the simulated `real-life' environment. This paper presents the dynamic models, the control strategies and the simulation exercises for commissioning and evaluation of the strategies. (C) 1999 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Wang1999.pdf:Wang1999.pdf:PDF},
+  groups                     = {Buildings, single house forecasting},
+  number-of-cited-references = {22},
+  owner                      = {pb},
+  times-cited                = {41},
+  timestamp                  = {2010.11.16},
+  unique-id                  = {ISI:000081578500003},
+}
+
+@Article{Wang1998,
+  author                     = {Wang, SW},
+  title                      = {Dynamic simulation of a building central chilling system and evaluation of EMCS on-line control strategies},
+  journal                    = {BUILDING AND ENVIRONMENT},
+  year                       = {1998},
+  volume                     = {33},
+  number                     = {1},
+  pages                      = {1-20},
+  month                      = {JAN},
+  issn                       = {0360-1323},
+  abstract                   = {Dynamic models of centrifugal chillers, heat exchangers, seawater and chilled-water networks, cooling coil, actuator, sensor, variable-speed pump and DDC controller of EMCS are developed to simulate the dynamics of a seawater-cooled chilling system controlled by EMCS on line strategies. The thermal, hydraulic, energy and control performances of the system are simulated. The on-line control strategies (i.e. adaptive and derivative strategies) developed for the central chilling system are tested and evaluated by applying them to control the simulated living chilling system under different AHU dynamic loads. This paper presents the models, system dynamic simulation of the chilling system, chilling system performance monitoring validation of simulation, EMCS on-line control strategies and evaluation of the strategies. (C) 1997 Published by Elsevier Science Ltd.},
+  file                       = {Wang1998.pdf:Wang1998.pdf:PDF},
+  groups                     = {Buildings, single house forecasting},
+  number-of-cited-references = {19},
+  owner                      = {pb},
+  times-cited                = {31},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000071024000001},
+}
+
+@Article{Wang2001,
+  author                     = {Wang, SW and Chen, YM},
+  title                      = {A novel and simple building load calculation model for building and system dynamic simulation},
+  journal                    = {APPLIED THERMAL ENGINEERING},
+  year                       = {2001},
+  volume                     = {21},
+  number                     = {6},
+  pages                      = {683-702},
+  month                      = {APR},
+  issn                       = {1359-4311},
+  abstract                   = {Based on polynomial s-transfer functions of transient heat conduction through a building construction, a novel and simple model is developed for building thermal load calculation. The polynomial s-transfer functions are estimated from the theoretical frequency responses of the building construction by frequency-domain regression method. The simple polynomial s-transfer functions are completely equivalent to the hyperbolic s-transfer functions in terms of frequency characteristics. First, the frequency responses of the total transmission matrix are calculated within the frequency range concerned. Then, a set of linear equations is solved yielding a simple polynomial s-transfer function for cross and internal heat conduction. Finally, simple recursive formulae are obtained by inverse Laplace transforms and discretization of the convolution integrals. The coefficients of the model are independent from time step and the time step can be varied according to the requirement during simulation. Validations and comparisons show that this model has fast computation speed and no numerical unstability, and provides high accuracy and good flexibility to the requirement of variable time step. (C) 2001 Elsevier Science Ltd. All rights reserved.},
+  file                       = {Wang2001.pdf:Wang2001.pdf:PDF},
+  groups                     = {Buildings, single house forecasting},
+  number-of-cited-references = {24},
+  owner                      = {pb},
+  review                     = {Transfer-function modelling.},
+  times-cited                = {16},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000166780400006},
+}
+
+@Book{Wasserman2004,
+  Title                    = {All of statistics: a concise course in statistical inference},
+  Author                   = {Wasserman, Larry},
+  Publisher                = {Springer},
+  Year                     = {2004},
+
+  File                     = {:/home/pb/j/literature/books/AllOfStatistics_A Concise Course in Statistical Inference.pdf:PDF},
+  Owner                    = {pb},
+  Timestamp                = {2013.07.03}
+}
+
+@Article{Widen2009,
+  author    = {Joakim Widén and Magdalena Lundh and Iana Vassileva and Erik Dahlquist and Kajsa Ellegård and Ewa Wäckelgård},
+  title     = {Constructing load profiles for household electricity and hot water from time-use data—Modelling approach and validation},
+  journal   = {Energy and Buildings},
+  year      = {2009},
+  volume    = {41},
+  number    = {7},
+  pages     = {753-768},
+  issn      = {0378-7788},
+  abstract  = {Time-use data, describing in detail the everyday life of household members as high-resolved activity sequences, have a largely unrealized potential of contributing to domestic energy demand modelling. A model for computation of daily electricity and hot-water demand profiles from time-use data was developed, using simple conversion schemes, mean appliance and water-tap data and general daylight availability distributions. Validation against detailed, end-use specific electricity measurements in a small sample of households reveals that the model for household electricity reproduces hourly load patterns with preservation of important qualitative features. The output from the model, when applied to a large data set of time use in Sweden, also shows correspondence to aggregate profiles for both household electricity and hot water from recent Swedish measurement surveys. Deviations on individual household level are predominantly due to occasionally ill-reported time-use data and on aggregate population level due to slightly non-representative samples. Future uses and developments are identified and it is suggested that modelling energy use from time-use data could be an alternative, or a complement, to energy demand measurements in households. },
+  doi       = {http://dx.doi.org/10.1016/j.enbuild.2009.02.013},
+  file      = {Widen2009.pdf:Widen2009.pdf:PDF},
+  groups    = {kernelpaper},
+  keywords  = {Load modelling},
+  owner     = {pb},
+  timestamp = {2013.11.18},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0378778809000413},
+}
+
+@Article{Wilson2002,
+  author    = {Scott B. Wilson and Ronald Emerson},
+  title     = {Spike detection: a review and comparison of algorithms},
+  journal   = {Clinical Neurophysiology},
+  year      = {2002},
+  volume    = {113},
+  number    = {12},
+  pages     = {1873-1881},
+  issn      = {1388-2457},
+  abstract  = {For algorithm developers, this review details recent approaches to the problem, compares the accuracy of various algorithms, identifies common testing issues and proposes some solutions. For the algorithm user, e.g. electroencephalograph (EEG) technician or neurologist, this review provides an estimate of algorithm accuracy and comparison to that of human experts. Manuscripts dated from 1975 are reviewed. Progress since Frost's 1985 review of the state of the art is discussed. Twenty-five manuscripts are reviewed. Many novel methods have been proposed including neural networks and high-resolution frequency methods. Algorithm accuracy is less than that of experts, but the accuracy of experts is probably less than what is commonly believed. Larger record sets will be required for expert-level detection algorithms. },
+  doi       = {http://dx.doi.org/10.1016/S1388-2457(02)00297-3},
+  file      = {Wilson2002.pdf:Wilson2002.pdf:PDF},
+  groups    = {kernelpaper},
+  keywords  = {Epilepsy},
+  owner     = {pb},
+  timestamp = {2013.11.28},
+  url       = {http://www.sciencedirect.com/science/article/pii/S1388245702002973},
+}
+
+@InProceedings{Yang2011,
+  author    = {Yang, G. Y. and Østergaard, J. and Kjær, S. B. and Constantin, A. and Ballegaard, H. P. and Lazar, R. D. and Borup, U. and Stephansen, C. and Mattesen, M. and Sørensen, J. B.},
+  title     = {Smart integration of photovoltaic power systems on the island of Bornholm},
+  booktitle = {1st International Workshop on the Integration of Solar Power into Power Systems Aarhus, Denmark, 24 October 2011},
+  year      = {2011},
+  file      = {Yang2011.pdf:Yang2011.pdf:PDF},
+  groups    = {phdthesis},
+  owner     = {pb},
+  timestamp = {2012.04.01},
+}
+
+@Article{Yao2005,
+  author                     = {Yao, RM and Steemers, K},
+  title                      = {A method of formulating energy load profile for domestic buildings in the UK},
+  journal                    = {ENERGY AND BUILDINGS},
+  year                       = {2005},
+  volume                     = {37},
+  number                     = {6},
+  pages                      = {663-671},
+  month                      = {JUN},
+  issn                       = {0378-7788},
+  abstract                   = {There are varieties of physical and behavioral factors to determine energy demand load profile. The attainment of the optimum mix of measures and renewable energy system deployment requires a simple method suitable for using at the early design stage. A simple method of formulating load profile (SMLP) for UK domestic buildings has been presented in this paper. Domestic space heating load profile for different types of houses have been produced using thermal dynamic model which has been developed using thermal resistant network method. The daily breakdown energy demand load profile of appliance, domestic hot water and space heating can be predicted using this method. The method can produce daily load profile from individual house to urban community. It is suitable to be used at Renewable energy system strategic design stage. (c) 2004 Elsevier B.V. All rights reserved.},
+  doi                        = {10.1016/j.enbuild.2004.09.007},
+  file                       = {Yao2005.pdf:Yao2005.pdf:PDF},
+  groups                     = {Buildings, single house forecasting},
+  number-of-cited-references = {10},
+  owner                      = {pb},
+  review                     = {Doesn't seem to be too interesent.},
+  times-cited                = {20},
+  timestamp                  = {2010.11.17},
+  unique-id                  = {ISI:000227978300013},
+}
+
+@Article{Younes2005,
+  author    = {Younes, S. and Claywell, R. and Muneer, T.},
+  title     = {Quality control of solar radiation data: Present status and proposed new approaches},
+  journal   = {Energy},
+  year      = {2005},
+  volume    = {30},
+  number    = {9},
+  pages     = {1533-1549},
+  issn      = {0360-5442},
+  note      = {Measurement and Modelling of Solar Radiation and Daylight- Challenges for the 21st Century},
+  abstract  = {During the past few decades, there has been a continual rise in interest in passive and active solar energy uses, not only in the governmental and commercial sectors, but also within the private sector. There is thus a need for taking measurements of solar irradiation and creating local and regional databases of irradiation and synoptic (meteorological) information. However, there is no guarantee of the quality of the data collected, as often due care is not exercised with respect to quality control of the measured dataset. This article reviews the presently available procedures for quality assessment of the solar irradiation data. Furthermore, we propose a set of stringent physical and statistical measures to create a semi-automated procedure that is based on the creation of an envelope in the clearness index–diffuse to global irradiance ratio domain. The procedure is very general in nature and may be used with equal effectiveness for any terrestrial dataset.},
+  doi       = {10.1016/j.energy.2004.04.031},
+  file      = {Younes2005.pdf:Younes2005.pdf:PDF},
+  groups    = {correction},
+  owner     = {pb},
+  review    = {Comments for use of the article as a reference in my correction paper: - Listing of general types of errors. Both equipment related errors and operational effors. Point: Many possible error sources.},
+  timestamp = {2012.02.14},
+}
+
+@Article{Zhou2008,
+  author    = {Zhou, Qiang and Wang, Shengwei and Xu, Xinhua and Xiao, Fu},
+  title     = {A grey-box model of next-day building thermal load prediction for energy-efficient control},
+  journal   = {International Journal of Energy Research},
+  year      = {2008},
+  volume    = {32},
+  number    = {15},
+  pages     = {1418-1431},
+  issn      = {1099-114X},
+  abstract  = {Accurate building thermal load prediction is essential to many building energy control strategies. To get reliable prediction of the hourly building load of the next day, air temperature/relative humidity and solar radiation prediction modules are integrated with a grey-box model. The regressive solar radiation module predicts the solar radiation using the forecasted cloud amount, sky condition and extreme temperatures from on-line weather stations, while the forecasted sky condition is used to correct the cloud amount forecast. The temperature/relative humidity prediction module uses a dynamic grey model (GM), which is specialized in the grey system with incomplete information. Both weather prediction modules are integrated into a building thermal load model for the on-line prediction of the building thermal load in the next day. The validation of both weather prediction modules and the on-line building thermal load prediction model are presented. Copyright © 2008 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/er.1458},
+  file      = {Zhou2008.pdf:Zhou2008.pdf:PDF},
+  groups    = {single house forecasting},
+  keywords  = {building load, load prediction, grey-box model, weather prediction},
+  owner     = {pb},
+  publisher = {John Wiley \& Sons, Ltd.},
+  timestamp = {2012.03.28},
+}
+
+@InProceedings{Zong2011,
+  Title                    = {Active load management in an intelligent building using model predictive control strategy},
+  Author                   = {Yi Zong and Kullmann, D. and Thavlov, A. and Gehrke, O. and Bindner, H.W.},
+  Booktitle                = {PowerTech, 2011 IEEE Trondheim},
+  Year                     = {2011},
+  Month                    = {june},
+  Pages                    = {1-6},
+
+  Abstract                 = {This paper introduces PowerFlexHouse, a research facility for exploring the technical potential of active load management in a distributed power system (SYSLAB) with a high penetration of renewable energy and presents in detail on how to implement a thermal model predictive controller for load shifting in PowerFlexHouse heaters' power consumption scheme. With this demand side control study, it is expected that this method of demand response can dramatically raise energy efficiencies and improve grid reliability, when there is a high penetration of intermittent energy resources in the power system.},
+  Doi                      = {10.1109/PTC.2011.6019347},
+  File                     = {Zong2011.pdf:Zong2011.pdf:PDF},
+  Keywords                 = {PowerFlexHouse;active load management;demand response;demand side control;distributed power system;energy efficiency;grid reliability;intelligent building;intermittent energy resources;load shifting;power consumption;renewable energy resource;thermal model predictive controller;building management systems;demand side management;distributed power generation;energy conservation;power consumption;power generation reliability;power grids;predictive control;renewable energy sources;},
+  Owner                    = {pb},
+  Timestamp                = {2012.04.13}
+}
+
+@Article{Zugno2014,
+  author    = {Zugno, Marco and Conejo, Antonio J.},
+  title     = {A robust optimization approach to energy and reserve dispatch in electricity markets},
+  journal   = {European Journal of Operational Research},
+  year      = {2014},
+  note      = {under review},
+  file      = {Zugno2014.pdf:Zugno2014.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.25},
+}
+
+@Article{Zugno2013b,
+  author    = {Zugno, Marco and J{\'o}nsson, Tryggvi and Pinson, Pierre},
+  title     = {Trading wind energy on the basis of probabilistic forecasts both of wind generation and of market quantities},
+  journal   = {Wind Energy},
+  year      = {2013},
+  volume    = {16},
+  number    = {6},
+  pages     = {909--926},
+  file      = {Zugno2013b.pdf:Zugno2013b.pdf:PDF},
+  groups    = {Markets, OptimalBidding},
+  owner     = {pb},
+  timestamp = {2014.09.25},
+}
+
+@Article{Zugno2013a,
+  author    = {Zugno, Marco and Morales, Juan Miguel and Pinson, Pierre and Madsen, Henrik},
+  title     = {A bilevel model for electricity retailers' participation in a demand response market environment},
+  journal   = {Energy Economics},
+  year      = {2013},
+  volume    = {36},
+  pages     = {182--197},
+  file      = {Zugno2013a.pdf:Zugno2013a.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.25},
+}
+
+@Article{Zugno2013c,
+  author    = {Zugno, Marco and Morales, Juan Miguel and Pinson, Pierre and Madsen, Henrik},
+  title     = {Pool strategy of a price-maker wind power producer},
+  journal   = {IEEE Transactions on Power Systems},
+  year      = {2013},
+  volume    = {28},
+  number    = {3},
+  pages     = {3440--3450},
+  file      = {Zugno2013.pdf:Zugno2013.pdf:PDF},
+  groups    = {Markets},
+  owner     = {pb},
+  timestamp = {2014.09.25},
+}
+
+@PhdThesis{Zugno2013,
+  Title                    = {Optimization Under Uncertainty for Management of Renewables in Electricity Markets},
+  Author                   = {Marco Zugno and Pierre Pinson and {Morales González}, {Juan Miguel} and Henrik Madsen},
+  Year                     = {2013},
+
+  Owner                    = {pb},
+  Publisher                = {Technical University of Denmark},
+  Series                   = {PHD-2013},
+  Timestamp                = {2014.09.04}
+}
+
+@Article{Zweibel2008,
+  author    = {Zweibel, K. and Mason, J. and Fthenakis, V.},
+  title     = {A solar grand plan},
+  journal   = {Scientific American},
+  year      = {2008},
+  volume    = {298},
+  number    = {1},
+  pages     = {64-73},
+  groups    = {phdthesis},
+  owner     = {pb},
+  publisher = {Nature Publishing Group},
+  timestamp = {2012.04.03},
+}
+
+@Book{Bloem1994,
+  title     = {System Identification Applied to Building Performance Data},
+  publisher = {CEC-EUR 15885 EN},
+  year      = {1994},
+  editor    = {Bloem, J. J.},
+  groups    = {Buildings},
+  owner     = {pb},
+  timestamp = {2010.11.20},
+}
+
+@Proceedings{solarworkshop2011,
+  Title                    = {1st International Workshop on the Integration of Solar Power into Power Systems Aarhus, Denmark, 24 October 2011},
+  Year                     = {2011},
+
+  Owner                    = {pb},
+  Timestamp                = {2012.04.01}
+}
+
+@Article{Persson2017,
+  author    = {Caroline Persson and Peder Bacher and Takahiro Shiga and Henrik Madsen},
+  title     = {Multi-site solar power forecasting using gradient boosted regression trees},
+  journal   = {Solar Energy},
+  year      = {2017},
+  volume    = {150},
+  pages     = {423 - 436},
+  issn      = {0038-092X},
+  abstract  = {Abstract The challenges to optimally utilize weather dependent renewable energy sources call for powerful tools for forecasting. This paper presents a non-parametric machine learning approach used for multi-site prediction of solar power generation on a forecast horizon of one to six hours. Historical power generation and relevant meteorological variables related to 42 individual \{PV\} rooftop installations are used to train a gradient boosted regression tree (GBRT) model. When compared to single-site linear autoregressive and variations of \{GBRT\} models the multi-site model shows competitive results in terms of root mean squared error on all forecast horizons. The predictive performance and the simplicity of the model setup make the boosted tree model a simple and attractive compliment to conventional forecasting techniques. },
+  doi       = {https://doi.org/10.1016/j.solener.2017.04.066},
+  file      = {Persson2017.pdf:Persson2017.pdf:PDF},
+  keywords  = {Solar power forecasting, Multi-site forecasting, Spatio-temporal forecasting, Regression trees, Gradient boosting, Machine learning },
+  owner     = {pbac},
+  timestamp = {2017.05.10},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0038092X17303717},
+}
+
+@Article{Vogler-Finck2017,
+  author    = {P.J.C. Vogler-Finck and P. Bacher and H. Madsen},
+  title     = {Online short-term forecast of greenhouse heat load using a weather forecast service},
+  journal   = {Applied Energy},
+  year      = {2017},
+  volume    = {205},
+  pages     = {1298 - 1310},
+  issn      = {0306-2619},
+  abstract  = {Abstract In some district heating systems, greenhouses represent a significant share of the total load, and can lead to operational challenges. Short term load forecast of such consumers has a strong potential to contribute to the improvement of the overall system efficiency. This work investigates the performance of recursive least squares for predicting the heat load of individual greenhouses in an online manner. Predictor inputs (weekly curves terms and weather forecast inputs) are selected in an automated manner using a forward selection approach. Historical load measurements from 5 Danish greenhouses with different operational characteristics were used, together with weather measurements and a weather forecast service. It was found that these predictors of reduced complexity and computational load performed well at capturing recurring load profiles, but not fast frequency random changes. Overall, the root mean square error of the prediction was within 8–20% of the peak load for the set of consumers over the 8 months period considered. },
+  doi       = {https://doi.org/10.1016/j.apenergy.2017.08.013},
+  file      = {Vogler-Finck2017.pdf:Vogler-Finck2017.pdf:PDF},
+  keywords  = {Heat demand, Load forecast, Recursive least squares, Weather forecast service, Greenhouses, Model selection },
+  owner     = {pbac},
+  timestamp = {2017.09.26},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0306261917310292},
+}
+
+@Book{Gumbel1958,
+  title     = {Statistics of extremes},
+  year      = {1958},
+  author    = {Gumbel, Emil Julius},
+  groups    = {EVT},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@TechReport{ISO2006,
+  author      = {ISO 13790:2006},
+  title       = {Energy performance of buildings — Calculation of energy use for space heating and cooling},
+  institution = {ISO},
+  year        = {2006},
+  file        = {Symbols:ISO2006Symbols.pdf:PDF;ISO2006.pdf:ISO2006.pdf:PDF},
+  owner       = {pb},
+  timestamp   = {2014.08.07},
+}
+
+@TechReport{Arana2007,
+  author      = {Arana, Ander Goikoetxea},
+  title       = {FlexHouse},
+  institution = {Risø DTU},
+  year        = {2007},
+  groups      = {Buildings},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@TechReport{Bacher2010,
+  author      = {Bacher, Peder and Madsen, Henrik},
+  title       = {Experiments and Data for Building Energy Performance Analysis : Financed by The Danish Electricity Saving Trust},
+  institution = {DTU Informatics, Building 321},
+  year        = {2010},
+  address     = {Kgs. Lyngby},
+  abstract    = {This report documents experiments carried out in FlexHouse at Risø DTU during February and March of 2009. FlexHouse is a part of the experimental distributed energy system, Syslab. The building is controlled by one central server, where among other things it is possible to record temperature in each room, and implement control of the installed electrical heaters. Furthermore a climate station is located right next to the building. The objective of the experiments is to provide data for models of the thermal dynamics of the building. The designs of the experiments is such that the conditions are during the experiments, from conditions optimized for modelling toward more common living conditions, i.e. from high variation of the indoor temperature, toward thermostatic temperature control and human activities in the building. In total five experiments have been successfully carried out, two with PRBS signals controlling the heaters, and three with thermostatic control.},
+  affiliation = {Technical University of Denmark, Department of Informatics and Mathematical Modeling, Mathematical Statistics and Technical University of Denmark, Department of Informatics and Mathematical Modeling, Mathematical Statistics},
+  file        = {:/home/pb/j/literature/techreports/tr10_03-update.pdf:PDF},
+  groups      = {Buildings},
+  language    = {English},
+  owner       = {pbac},
+  series      = {IMM-Technical report-2010-03},
+  timestamp   = {2018.02.26},
+}
+
+@Book{Bacher2012a,
+  title     = {Models for efficient integration of solar energy},
+  publisher = {Technical University of Denmark},
+  year      = {2012},
+  author    = {Peder Bacher and Henrik Madsen and Nielsen, {Henrik Aalborg}},
+  series    = {IMM-PhD-2012},
+  owner     = {pbac},
+  timestamp = {2013.04.29},
+}
+
+@Article{Bacher2009,
+  author    = {Bacher, Peder and Madsen, Henrik and Nielsen, Henrik Aalborg},
+  title     = {Online short-term solar power forecasting},
+  journal   = {Solar Energy},
+  year      = {2009},
+  volume    = {83},
+  number    = {10},
+  pages     = {1772-1783},
+  issn      = {0038092x},
+  abstract  = {This paper describes a new approach to online forecasting of power production from PV systems. The method is suited to online forecasting in many applications and in this paper it is used to predict hourly values of solar power for horizons of up to 36h. The data used is 15-min observations of solar power from 21 PV systems located on rooftops in a small village in Denmark. The suggested method is a two-stage method where first a statistical normalization of the solar power is obtained using a clear sky model. The clear sky model is found using statistical smoothing techniques. Then forecasts of the normalized solar power are calculated using adaptive linear time series models. Both autoregressive (AR) and AR with exogenous input (ARX) models are evaluated, where the latter takes numerical weather predictions (NWPs) as input. The results indicate that for forecasts up to 2h ahead the most important input is the available observations of solar power, while for longer horizons NWPs are the most important input. A root mean square error improvement of around 35% is achieved by the ARX model compared to a proposed reference model.},
+  copyright = {Elsevier Ltd},
+  file      = {Bacher2009.pdf:Bacher2009.pdf:PDF},
+  groups    = {Forecasting, solar},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@TechReport{Bacher2010a,
+  author      = {Bacher, Peder and Thavlov, Anders and Madsen, Henrik},
+  title       = {Models for Energy Performance Analysis : Financed by The Danish Electricity Saving Trust},
+  institution = {DTU Informatics, Building 321},
+  year        = {2010},
+  address     = {Kgs. Lyngby},
+  affiliation = {Technical University of Denmark, Department of Informatics and Mathematical Modeling, Mathematical Statistics and Technical University of Denmark, Risø National Laboratory for Sustainable Energy, Intelligent Energy Systems Programme and Technical University of Denmark, Department of Informatics and Mathematical Modeling, Mathematical Statistics},
+  file        = {:/home/pb/j/literature/techreports/tr10_02.pdf:PDF},
+  groups      = {Buildings},
+  keywords    = {Intelligente energisystemer, Intelligent energy systems},
+  language    = {English},
+  owner       = {pbac},
+  series      = {IMM-Technical Report-2010-02},
+  timestamp   = {2018.02.26},
+}
+
+@Article{Bohlin1995,
+  author    = {Bohlin, Torsten and Graebe, Stefan F},
+  title     = {Issues in nonlinear stochastic grey box identification},
+  journal   = {International Journal of Adaptive Control and Signal Processing},
+  year      = {1995},
+  volume    = {9},
+  number    = {6},
+  pages     = {465-490},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.26},
+}
+
+@Article{Cao2008,
+  author    = {Cao, Jiacong and Lin, Xingchun},
+  title     = {Study of hourly and daily solar irradiation forecast using diagonal recurrent wavelet neural networks},
+  journal   = {Energy Conversion and Management},
+  year      = {2008},
+  volume    = {49},
+  number    = {6},
+  pages     = {1396-1406},
+  issn      = {01968904},
+  abstract  = {An accurate forecast of solar irradiation is required for various solar energy applications and environmental impact analyses in recent years. Comparatively, various irradiation forecast models based on artificial neural networks (ANN) perform much better in accuracy than many conventional prediction models. However, the forecast precision of most existing ANN based forecast models has not been satisfactory to researchers and engineers so far, and the generalization capability of these networks needs further improving. Combining the prominent dynamic properties of a recurrent neural network (RNN) with the enhanced ability of a wavelet neural network (WNN) in mapping nonlinear functions, a diagonal recurrent wavelet neural network (DRWNN) is newly established in this paper to perform fine forecasting of hourly and daily global solar irradiance. Some additional steps, e.g. applying historical information of cloud cover to sample data sets and the cloud cover from the weather forecast to network input, are adopted to help enhance the forecast precision. Besides, a specially scheduled two phase training algorithm is adopted. As examples, both hourly and daily irradiance forecasts are completed using sample data sets in Shanghai and Macau, and comparisons between irradiation models show that the DRWNN models are definitely more accurate.},
+  copyright = {Elsevier Ltd},
+  file      = {Cao2008.pdf:Cao2008.pdf:PDF},
+  groups    = {forecasting},
+  language  = {English},
+  owner     = {pbac},
+  review    = {This article contains very big errors. Daily values from the day, where hourly values are to be predicted, are used as input to the hourly forecast.},
+  timestamp = {2018.02.26},
+}
+
+@Misc{CEN2006a,
+  author    = {{CEN, European committee for standardization}},
+  title     = {EN{~}12975-2:2006, Thermal solar systems and components - Collectors - Part 2: Test methods},
+  year      = {2006},
+  owner     = {pb},
+  timestamp = {2011.08.11},
+}
+
+@InProceedings{Chowdhury1987,
+  author    = {Chowdhury, B.H. and Rahman, S.},
+  title     = {Forecasting sub-hourly solar irradiance for prediction of photovoltaic output},
+  booktitle = {IEEE Photovoltaic Specialists Conference, 19th, New Orleans, LA, May 4-8, 1987, Proceedings (A88-34226 13-44). New York, Institute of Electrical and Electronics Engineers, Inc., 1987, p. 171-176.},
+  year      = {1987},
+  pages     = {171-176},
+  abstract  = {Short-term prediction of photovoltaic power output through forecast of global solar irradiance in the subhourly time frame is explored. The decomposition of the global solar irradiance into a deterministic clear sky component and a stochastic cloud cover component is achieved through a parameterization process. The cloud cover time series is modeled by a Box-Jenkins-type ARIMA model and forecasts issued hourly for specified interval periods throughout the hour. Results show that when compared to actual data measured at several locations in the southeastern United States, the forecasts are quite accurate and the model is site-independent. Forecasts are found to be inaccurate only when there are sudden changes in the cloud cover moving across the sun. In other words, the randomness involved in sudden extreme changes in the sun's intensity during a single interval will not be picked up by the forecast model and is generally considered impossible to predict by any forecast model. One of the many application of the forecast methodology is to dispatch photovoltaic power output in the optimal power dispatch scheme of electric utilities.},
+  adsnote   = {Provided by the SAO/NASA Astrophysics Data System},
+  adsurl    = {http://adsabs.harvard.edu/abs/1987pvsp.conf..171C},
+  file      = {Chowdhury1987.pdf:Chowdhury1987.pdf:PDF},
+  groups    = {forecasting},
+  keywords  = {ENERGY CONVERSION EFFICIENCY, IRRADIANCE, PREDICTION ANALYSIS TECHNIQUES, SOLAR RADIATION, CLOUD COVER, TIME SERIES ANALYSIS},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Misc{DMI2011a,
+  author    = {DMI},
+  title     = {{Danish Meteorological Institute, DMI-HIRLAM-S05}},
+  year      = {2011},
+  owner     = {pb},
+  timestamp = {2011.08.13},
+  url       = {http://www.dmi.dk/eng/index/research_and_development/dmi-hirlam-2009.htm},
+}
+
+@Misc{DMI2012a,
+  author       = {DMI},
+  title        = {{Danish Meteorological Institute, Borgervejr}},
+  howpublished = {\url{www.borgervejr.dk}},
+  month        = {Feb.},
+  year         = {2012},
+  groups       = {correction},
+  owner        = {pb},
+  timestamp    = {2012.02.15},
+}
+
+@Article{Friling2009,
+  author    = {Friling, Nynne and Jiménez, María José and Bloem, Hans and Madsen, Henrik},
+  title     = {Modelling the heat dynamics of building integrated and ventilated photovoltaic modules},
+  journal   = {Energy and Buildings},
+  year      = {2009},
+  volume    = {41},
+  number    = {10},
+  pages     = {1051-1057},
+  issn      = {03787788},
+  abstract  = {This paper deals with mathematical modelling of the heat transfer of building integrated photovoltaic (BIPV) modules.The efficiency of the photovoltaic (PV) module and its temperature are negatively correlated. It is therefore of interest to lower the temperature of the PV module by increasing the heat transfer from the PV module. The experiment and data originate from a test reference module the EC-JRC Ispra. The set-up provides the opportunity of changing physical parameters, the ventilation speed and the type of air flow, and this makes it possible to determine the preferable set-up.To identify best set-up, grey-box models consisting of stochastic differential equations are applied. The models are first order stochastic state space models. Maximum likelihood estimation and the extended Kalman filter are applied in the parameter estimation phase. To validate the estimated models, plots of the residuals and autocorrelation functions of the residuals are analyzed.The analysis has revealed that it is necessary to use non-linear state space models in order to obtain a satisfactory description of the PV module temperature, and in order to be able to distinguish the variations in the set-up. The heat transfer is increased when the forced ventilation velocity is increased, while the change in type of air flow does not have as striking influence. The residual analysis show that the best description of the PV module temperature is obtained when fins, disturbing the laminar flow and making it turbulent, are applied in the set-up combined with high level of air flow. The improved description by the model is mainly seen in periods with high solar radiation.},
+  copyright = {Elsevier B.V.},
+  file      = {Friling2009.pdf:Friling2009.pdf:PDF},
+  groups    = {Buildings},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Gneiting2007,
+  author    = {Gneiting, Tilmann and Raftery, Adrian E},
+  title     = {Strictly proper scoring rules, prediction, and estimation},
+  journal   = {Journal of the American Statistical Association},
+  year      = {2007},
+  volume    = {102},
+  number    = {477},
+  pages     = {359--378},
+  file      = {Gneiting2007.pdf:Gneiting2007.pdf:PDF},
+  groups    = {Probabilistic},
+  owner     = {pbac},
+  publisher = {Taylor \& Francis},
+  timestamp = {2018.02.26},
+}
+
+@Article{Hastie1993,
+  author              = {Hastie, Trevor and Tibshirani, Robert},
+  title               = {Varying-Coefficient Models},
+  journal             = {Journal of the Royal Statistical Society. Series B (Methodological)},
+  year                = {1993},
+  volume              = {55},
+  number              = {4},
+  pages               = {757-796},
+  issn                = {00359246},
+  abstract            = {We explore a class of regression and generalized regression models in which the coefficients are allowed to vary as smooth functions of other variables. General algorithms are presented for estimating the models flexibly and some examples are given. This class of models ties together generalized additive models and dynamic generalized linear models into one common framework. When applied to the proportional hazards model for survival data, this approach provides a new way of modelling departures from the proportional hazards assumption.},
+  file                = {Hastie1993.pdf:Hastie1993.pdf:PDF},
+  jstor_articletype   = {primary_article},
+  jstor_formatteddate = {1993},
+  owner               = {pbac},
+  publisher           = {Blackwell Publishing for the Royal Statistical Society},
+  timestamp           = {2018.02.26},
+}
+
+@Article{Hersbach2000,
+  author    = {Hersbach, Hans},
+  title     = {Decomposition of the continuous ranked probability score for ensemble prediction systems},
+  journal   = {Weather and Forecasting},
+  year      = {2000},
+  volume    = {15},
+  number    = {5},
+  pages     = {559--570},
+  file      = {Hersbach2000.pdf:Hersbach2000.pdf:PDF},
+  groups    = {Probabilistic},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Hocaoglu2008,
+  author    = {Hocaoglu, Fatih O. and Gerek, Omer N. and Kurban, Mehmet},
+  title     = {Hourly solar radiation forecasting using optimal coefficient 2-D linear filters and feed-forward neural networks},
+  journal   = {Solar Energy},
+  year      = {2008},
+  volume    = {82},
+  number    = {8},
+  pages     = {714-726},
+  issn      = {0038092x},
+  abstract  = {In this work, the hourly solar radiation data collected during the period August 1, 2005-July 30, 2006 from the solar observation station in Iki Eylul campus area of Eskisehir region are studied. A two-dimensional (2-D) representation model of the hourly solar radiation data is proposed. The model provides a unique and compact visualization of the data for inspection, and enables accurate forecasting using image processing methods. Using the hourly solar radiation data mentioned above, the image model is formed in raster scan form with rows and columns corresponding to days and hours, respectively. Logically, the between-day correlations along the same hour segment provide the vertical correlations of the image, which is not available in the regular 1-D representation. To test the forecasting efficiency of the model, nine different linear filters with various filter-tap configurations are optimized and tested. The results provide the necessary correlation model and prediction directions for obtaining the optimum prediction template for forecasting. Next, the 2-D forecasting performance is tested through feed-forward neural networks (NN) using the same data. The optimal linear filters and NN models are compared in the sense of root mean square error (RMSE). It is observed that the 2-D model has pronounced advantages over the 1-D representation for both linear and NN prediction methods. Due to the capability of depicting the nonlinear behavior of the input data, the NN models are found to achieve better forecasting results than linear prediction filters in both 1-D and 2-D.},
+  copyright = {Elsevier Ltd},
+  file      = {Hocaoglu2008.pdf:Hocaoglu2008.pdf:PDF},
+  groups    = {forecasting},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Book{Holman2002,
+  title     = {Heat Transfer},
+  publisher = {McGraw-Hill},
+  year      = {2002},
+  author    = {Holman, J.P.},
+  edition   = {Ninth Edition},
+  groups    = {Buildings},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Hong2000,
+  author    = {Hong, Tianzhen and Chou, S.K. and Bong, T.Y.},
+  title     = {Building simulation: an overview of developments and information sources},
+  journal   = {Building and Environment},
+  year      = {2000},
+  volume    = {35},
+  number    = {4},
+  pages     = {347-361},
+  issn      = {03601323},
+  abstract  = {We review the state-of-the-art on the development and application of computer-aided building simulation by addressing some crucial questions in the field. Although the answers are not intended to be comprehensive, they are sufficiently varied to provide an overview ranging from the historical and technical development to choosing a suitable simulation program and performing building simulation. Popular icons of major interested agencies and simulation tools and key information sources are highlighted. Future trends in the design and operation of energy-efficient `green&quot; buildings are briefly described.},
+  copyright = {Elsevier Science Ltd},
+  file      = {:/home/pb/literature/articles/building_sim_an_overview.pdf:PDF},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Jimenez2008,
+  author    = {Jiménez, M.J. and Madsen, H. and Bloem, J.J. and Dammann, B.},
+  title     = {Estimation of non-linear continuous time models for the heat exchange dynamics of building integrated photovoltaic modules},
+  journal   = {Energy \&amp; Buildings},
+  year      = {2008},
+  volume    = {40},
+  number    = {2},
+  pages     = {157-167},
+  issn      = {03787788},
+  abstract  = {This paper focuses on a method for linear or non-linear continuous time modelling of physical systems using discrete time data. This approach facilitates a more appropriate modelling of more realistic non-linear systems. Particularly concerning advanced building components, convective and radiative heat interchanges are non-linear effects and represent significant contributions in a variety of components such as photovoltaic integrated façades or roofs and those using these effects as passive cooling strategies, etc. Since models are approximations of the physical system and data is encumbered with measurement errors it is also argued that it is important to consider stochastic models.More specifically this paper advocates for using continuous-discrete stochastic state space models in the form of non-linear partially observed stochastic differential equations (SDE's)-with measurement noise for modelling dynamic systems in continuous time using discrete time data. First of all the proposed method provides a method for modelling non-linear systems with partially observed states. The approach allows parameters to be estimated from experimental data in a prediction error (PE) setting, which gives less biased and more reproducible results in the presence of significant process noise than the more commonly used output error (OE) setting. To facilitate the use of continuous-discrete stochastic state space models, a PE estimation scheme that features maximum likelihood (ML) and maximum a posteriori (MAP) estimation is presented along with a software implementation.As a case study, the modelling of the thermal characteristics of a building integrated PV component is considered. The EC-JRC Ispra has made experimental data available. Both linear and non-linear models are identified. It is shown that a description of the non-linear heat transfer is essential. The resulting model is a non-linear first order stochastic differential equation for the heat transfer of the PV component.},
+  copyright = {Elsevier B.V.},
+  file      = {:estimationofnonlinearcontinuoustimemodelsfortheheatexchange.pdf:PDF},
+  groups    = {Buildings},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Jonsson2010,
+  author    = {Tryggvi J{\'o}nsson and Pierre Pinson and Henrik Madsen},
+  title     = {On the market impact of wind energy forecasts},
+  journal   = {Energy Economics},
+  year      = {2010},
+  volume    = {32},
+  number    = {2},
+  pages     = {313 - 320},
+  issn      = {0140-9883},
+  abstract  = {This paper presents an analysis of how day-ahead electricity spot prices are affected by day-ahead wind power forecasts. Demonstration of this relationship is given as a test case for the Western Danish price area of the Nord Pool's Elspot market. Impact on the average price behaviour is investigated as well as that on the distributional properties of the price. By using a non-parametric regression model to assess the effects of wind power forecasts on the average behaviour, the non-linearities and time variations in the relationship are captured well and the effects are shown to be quite substantial. Furthermore, by evaluating the distributional properties of the spot prices under different scenarios, the impact of the wind power forecasts on the price distribution is proved to be considerable. The conditional price distribution is moreover shown to be non-Gaussian. This implies that forecasting models for electricity spot prices for which parameters are estimated by a least squares techniques will not have Gaussian residuals. Hence the widespread assumption of Gaussian residuals from electricity spot price models is shown to be inadequate for these model types. The revealed effects are likely to be observable and qualitatively similar in other day-ahead electricity markets significantly penetrated by wind power.},
+  doi       = {http://dx.doi.org/10.1016/j.eneco.2009.10.018},
+  file      = {:Jonsson2010.pdf:PDF},
+  keywords  = {Electricity market},
+  owner     = {pb},
+  timestamp = {2016.01.06},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0140988309002011},
+}
+
+@MastersThesis{Juhl2011,
+  author    = {R. Juhl},
+  title     = {Towards Efficient Estimations of Parameters in Stochastic Differential Equations},
+  school    = {Technical University of Denmark, {DTU} Informatics, {E-}mail: reception@imm.dtu.dk},
+  year      = {2011},
+  address   = {Asmussens Alle, Building 305, {DK-}2800 Kgs. Lyngby, Denmark},
+  note      = {Supervised by Professor Henrik Madsen, hm@imm.dtu.dk, {DTU} Informatics},
+  abstract  = {Stochastic differential equations are gaining popularity, but estimating the models can be rather time consuming. {CTSM} v2.3 is a graphical entry point which quickly becomes cumbersome. The present thesis successfully implements {CTSM} in the scriptable R language and exploit the independent function evaluations in the gradient. Several non-linear model are tested to determine the performance running parallel. The best speed-up observed is 10x at a low cost of additional total {CPU} usage of a few percent. The new {CTSM} interface lets a user diagnose erroneous estimations using the newly added traces of the Hessian, gradient and parameters. It lives within R and its very flexible environment where data preprocessing and post processing can be performed with the new {CTSM}.},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+  url       = {http://www.imm.dtu.dk/English.aspx},
+}
+
+@TechReport{Kristensen2003a,
+  author      = {Kristensen, Niels Rode and Madsen, Henrik},
+  title       = {Continuous Time Stochastic Modelling, {CTSM} 2.3 - User's Guide},
+  institution = {DTU},
+  year        = {2003},
+  file        = {Kristensen2003a.pdf:Kristensen2003a.pdf:PDF},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@Article{Kristensen2004,
+  author    = {Niels Rode Kristensen and Henrik Madsen and Sten Bay J{\o}rgensen},
+  title     = {Parameter estimation in stochastic grey-box models},
+  journal   = {Automatica},
+  year      = {2004},
+  volume    = {40},
+  number    = {2},
+  pages     = {225-237},
+  issn      = {0005-1098},
+  abstract  = {An efficient and flexible parameter estimation scheme for grey-box models in the sense of discretely, partially observed It? stochastic differential equations with measurement noise is presented along with a corresponding software implementation. The estimation scheme is based on the extended Kalman filter and features maximum likelihood as well as maximum a posteriori estimation on multiple independent data sets, including irregularly sampled data sets and data sets with occasional outliers and missing observations. The software implementation is compared to an existing software tool and proves to have better performance both in terms of quality of estimates for nonlinear systems with significant diffusion and in terms of reproducibility. In particular, the new tool provides more accurate and more consistent estimates of the parameters of the diffusion term.},
+  doi       = {DOI: 10.1016/j.automatica.2003.10.001},
+  file      = {Kristensen2004.pdf:Kristensen2004.pdf:PDF},
+  groups    = {TheoreticalModeling},
+  keywords  = {Grey-box models},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Kristensen2004a,
+  author    = {Niels Rode Kristensen and Henrik Madsen and Sten Bay J{\o}rgensen},
+  title     = {A method for systematic improvement of stochastic grey-box models},
+  journal   = {Computers \& Chemical Engineering},
+  year      = {2004},
+  volume    = {28},
+  number    = {8},
+  pages     = {1431-1449},
+  issn      = {0098-1354},
+  abstract  = {A systematic framework for improving the quality of continuous time models of dynamic systems based on experimental data is presented. The framework is based on an interplay between stochastic differential equation modelling, statistical tests and nonparametric modelling and provides features that allow model deficiencies to be pinpointed and their structural origin to be uncovered. More specifically, the proposed framework can be used to obtain estimates of unknown functional relations, in turn allowing unknown or inappropriately modelled phenomena to be uncovered. In this manner the framework permits systematic iterative model improvement. The performance of the proposed framework is illustrated through a case study involving a dynamic model of a fed-batch bioreactor, where it is shown how an inappropriately modelled biomass growth rate can be uncovered and a proper functional relation inferred. A key point illustrated through this case study is that functional relations involving unmeasured variables can also be uncovered.},
+  doi       = {DOI: 10.1016/j.compchemeng.2003.10.003},
+  file      = {Kristensen2004a.pdf:Kristensen2004a.pdf:PDF},
+  groups    = {TheoreticalModeling},
+  keywords  = {Model improvement},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Book{Ljung1999,
+  title     = {System identification},
+  publisher = {Wiley Online Library},
+  year      = {1999},
+  author    = {Ljung, Lennart},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Book{Madsen2007,
+  title     = {Time Series Analysis},
+  publisher = {Chapman \& Hall/CRC},
+  year      = {2007},
+  author    = {Madsen, Henrik},
+  file      = {:books/Henrik Madsen-Time Series Analysis(2007).pdf:PDF},
+  groups    = {TheoreticalModeling},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Madsen1995,
+  author    = {Madsen, H. and Holst, J.},
+  title     = {Estimation of continuous-time models for the heat dynamics of a building},
+  journal   = {Energy and Buildings},
+  year      = {1995},
+  volume    = {22},
+  number    = {1},
+  pages     = {67-79},
+  issn      = {03787788},
+  abstract  = {This paper describes a method for estimation of continuous-time models for the heat dynamics of buildings based on discrete-time building performance data. The parameters in the continuous-time model are estimated by the maximum likelihood method where a Kalman filter is used in calculating the likelihood function. The modeling procedure is illustrated by using measurements from an experiment where the heat input from electrical heaters is controlled by a pseudorandom binary signal. For the considered building a rather simple model containing two time constants is found adequate. Owing to the continuous-time formulation the parameters of the model are directly physically interpretable. The performance of the model for both forecasting and simulation is illustrated.},
+  file      = {Madsen1995.pdf:Madsen1995.pdf:PDF},
+  groups    = {Buildings},
+  language  = {English},
+  owner     = {pbac},
+  review    = {Here I can write my notes :-)},
+  timestamp = {2018.02.26},
+}
+
+@Book{Madsen2000,
+  title     = {Modelling Non-Linear and Non-Stationary Time Series},
+  publisher = {IMM, DTU},
+  year      = {2000},
+  author    = {Madsen, Henrik and Holst, Jan},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@TechReport{Madsen1993,
+  author      = {Madsen, Henrik and Schultz, J.M.},
+  title       = {Short Time Determination of the Heat Dynamics of Buildings},
+  institution = {DTU},
+  year        = {1993},
+  groups      = {Buildings},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@Article{Martin2010,
+  author    = {Martín, Luis and Zarzalejo, Luis F. and Polo, Jesús and Navarro, Ana and Marchante, Ruth and Cony, Marco},
+  title     = {Prediction of global solar irradiance based on time series analysis: Application to solar thermal power plants energy production planning},
+  journal   = {Solar Energy},
+  year      = {2010},
+  volume    = {84},
+  number    = {10},
+  pages     = {1772-1781},
+  issn      = {0038092x},
+  abstract  = {Due to strong increase of solar power generation, the predictions of incoming solar energy are acquiring more importance. Photovoltaic and solar thermal are the main sources of electricity generation from solar energy. In the case of solar thermal energy plants with storage energy system, its management and operation need reliable predictions of solar irradiance with the same temporal resolution as the temporal capacity of the back-up system. These plants can work like a conventional power plant and compete in the energy stock market avoiding intermittence in electricity production.This work presents a comparisons of statistical models based on time series applied to predict half daily values of global solar irradiance with a temporal horizon of 3 days. Half daily values consist of accumulated hourly global solar irradiance from solar raise to solar noon and from noon until dawn for each day. The dataset of ground solar radiation used belongs to stations of Spanish National Weather Service (AEMet). The models tested are autoregressive, neural networks and fuzzy logic models. Due to the fact that half daily solar irradiance time series is non-stationary, it has been necessary to transform it to two new stationary variables (clearness index and lost component) which are used as input of the predictive models. Improvement in terms of RMSD of the models essayed is compared against the model based on persistence. The validation process shows that all models essayed improve persistence. The best approach to forecast half daily values of solar irradiance is neural network models with lost component as input, except Lerida station where models based on clearness index have less uncertainty because this magnitude has a linear behaviour and it is easier to simulate by models.},
+  copyright = {Elsevier Ltd},
+  file      = {Martin2010.pdf:Martin2010.pdf:PDF},
+  groups    = {forecasting, phdthesis},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Moeller2008,
+  author    = {Møller, Jan Kloppenborg and Nielsen, Henrik Aalborg and Madsen, Henrik},
+  title     = {Time-adaptive quantile regression},
+  journal   = {Computational Statistics and Data Analysis},
+  year      = {2008},
+  volume    = {52},
+  number    = {3},
+  pages     = {1292-1303},
+  issn      = {01679473},
+  abstract  = {An algorithm for time-adaptive quantile regression is presented. The algorithm is based on the simplex algorithm, and the linear optimization formulation of the quantile regression problem is given. The observations have been split to allow a direct use of the simplex algorithm. The simplex method and an updating procedure are combined into a new algorithm for time-adaptive quantile regression, which generates new solutions on the basis of the old solution, leading to savings in computation time. The suggested algorithm is tested against a static quantile regression model on a data set with wind power production, where the models combine splines and quantile regression. The comparison indicates superior performance for the time-adaptive quantile regression in all the performance parameters considered.1},
+  copyright = {Elsevier B.V.},
+  file      = {:time_adaptive_quantile_regression.pdf:PDF},
+  groups    = {TheoreticalModeling},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@TechReport{Nielsen2008,
+  author      = {Nielsen, Henrik Aalborg},
+  title       = {Estimation of {UA}-values for single-family houses},
+  institution = {ENFOR},
+  year        = {2008},
+  file        = {:techreports/simpleUAhorsensFinal.pdf:PDF},
+  groups      = {Buildings, single house forecasting, PerformanceKPI},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@Article{Perers1993,
+  author    = {Perers, B.},
+  title     = {Dynamic method for solar collector array testing and evaluation with standard database and simulation programs},
+  journal   = {Solar Energy},
+  year      = {1993},
+  volume    = {50},
+  number    = {6},
+  pages     = {517-526},
+  issn      = {0038092x},
+  abstract  = {A measurement and evaluation method is described by which standard collector performance parameters can be derived directly from measured outdoor data. Standard programs with routines for multiple regression can be used for the parameter identification. A continuous flow is applied in the collector loop during the test. Data for the whole day can then be used. A one-node capacitance correction for dynamic effects and separate incidence angle modifiers for direct and diffuse radiation are essential for the accuracy of the method. The model is set up for thermal power output (and not efficiency). This forces the parameters to values that are suitable for prediction of long-term performance. The collector model and parameters correspond closely to those used in existing detailed simulation programs such as TRNSYS, WATSUN, or MINSUN. The method can be used as an accurate bridge between short-term testing and long-term prediction by simulation.},
+  copyright = {Pergamon Press Ltd.},
+  file      = {Perers1993.pdf:Perers1993.pdf:PDF},
+  groups    = {Collector testing},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Manual{RCoreTeam2013,
+  title        = {R: A Language and Environment for Statistical Computing},
+  author       = {{R Core Team}},
+  organization = {R Foundation for Statistical Computing},
+  address      = {Vienna, Austria},
+  year         = {2013},
+  owner        = {pb},
+  timestamp    = {2014.01.27},
+  url          = {http://www.R-project.org/},
+}
+
+@Article{Sfetsos2000,
+  author    = {Sfetsos, A. and Coonick, A.H.},
+  title     = {Univariate and multivariate forecasting of hourly solar radiation with artificial intelligence techniques},
+  journal   = {Solar Energy},
+  year      = {2000},
+  volume    = {68},
+  number    = {2},
+  pages     = {169-178},
+  issn      = {0038092x},
+  abstract  = {This paper introduces a new approach for the forecasting of mean hourly global solar radiation received by a horizontal surface. In addition to the traditional linear methods, several artificial-intelligence-based techniques are studied. These include linear, feed-forward, recurrent Elman and Radial Basis neural networks alongside the adaptive neuro-fuzzy inference scheme. The problem is examined initially for the univariate case, and is extended to include additional meteorological parameters in the process of estimating the optimum model. The results indicate that the developed artificial intelligence models predict the solar radiation time series more effectively compared to the conventional procedures based on the clearness index. The forecasting ability of some models can be further enhanced with the use of additional meteorological parameters.},
+  copyright = {Elsevier Science Ltd},
+  file      = {Sfetsos2000.pdf:Sfetsos2000.pdf:PDF},
+  groups    = {forecasting},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Misc{SolarKeymark2011,
+  author    = {{Solar Keymark}},
+  title     = {{Homepage and database (all tested collectors according to EN12975 in Europe)}},
+  year      = {2011},
+  owner     = {pb},
+  timestamp = {2011.08.11},
+  url       = {http://solarkey.dk/solarkeymarkdata/qCollectorCertificates/ShowQCollectorCertificatesTable.aspx},
+}
+
+@TechReport{Thavlov2008,
+  author      = {Thavlov, Anders and Bacher, Peder and Madsen, Henrik},
+  title       = {Data for Energy Performance Analysis},
+  institution = {IMM, DTU},
+  year        = {2008},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@Book{Wickham2009,
+  title     = {ggplot2: elegant graphics for data analysis},
+  publisher = {Springer New York},
+  year      = {2009},
+  author    = {Hadley Wickham},
+  isbn      = {978-0-387-98140-6},
+  file      = {:/home/pb/j/literature/books/ggplot2 Elegant Graphics for Data Analysis.pdf:PDF},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+  url       = {http://had.co.nz/ggplot2/book},
+}
+
+@Article{Zavala2009,
+  author    = {Victor M. Zavala and Emil M. Constantinescu and Theodore Krause and Mihai Anitescu},
+  title     = {On-line economic optimization of energy systems using weather forecast information},
+  journal   = {Journal of Process Control},
+  year      = {2009},
+  volume    = {19},
+  number    = {10},
+  pages     = {1725-1736},
+  issn      = {0959-1524},
+  abstract  = {We establish an on-line optimization framework to exploit weather forecast information in the operation of energy systems. We argue that anticipating the weather conditions can lead to more proactive and cost-effective operations. The framework is based on the solution of a stochastic dynamic real-time optimization (D-RTO) problem incorporating forecasts generated from a state-of-the-art weather prediction model. The necessary uncertainty information is extracted from the weather model using an ensemble approach. The accuracy of the forecast trends and uncertainty bounds are validated using real meteorological data. We present a numerical simulation study in a building system to demonstrate the developments.},
+  doi       = {DOI: 10.1016/j.jprocont.2009.07.004},
+  file      = {Zavala2009.pdf:Zavala2009.pdf:PDF},
+  groups    = {Buildings, single house forecasting, OptimalBidding, Probabilistic},
+  keywords  = {Large-scale},
+  owner     = {pbac},
+  review    = {not read yet hentet 20101110},
+  timestamp = {2018.02.26},
+}
+
+@Article{Jensen2017,
+  author    = {Jensen, {Tue Vissing} and Pierre Pinson},
+  title     = {RE-Europe, a large-scale dataset for modeling a highly renewable European electricity system},
+  journal   = {Scientific Data},
+  year      = {2017},
+  volume    = {4},
+  issn      = {2052-4463},
+  abstract  = {Future highly renewable energy systems will couple to complex weather and climate dynamics. This coupling is generally not captured(R2.8) indetailby the open models developed in the power and energy system communities, where such open models exist. To enable modeling sucha future energy system, we describe a dedicated large-scale dataset for a renewable electric power system. The dataset combines a transmissionnetwork model, as well as information for generation and demand. Generation includes conventional generators with their technical and economiccharacteristics, as well as weather-driven forecasts and corresponding realizations for renewable energy generation for a period of 3 years.(R2.9)These may be scaled according to the envisioned degrees of renewable penetration in a future European energy system.(R2.10) The spatialcoverage, completeness and resolution of this dataset, open the door to the evaluation, scaling analysis and replicability check of a wealth of proposals in, e.g., market design, network actor coordination and forecastingof renewable power generation.},
+  doi       = {10.1038/sdata.2017.175},
+  file      = {Jensen2017.pdf:Jensen2017.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Nature Publishing Group},
+  timestamp = {2018.02.07},
+}
+
+@Article{Sakia1992,
+  author    = {Sakia, RM},
+  title     = {The Box-Cox transformation technique: a review},
+  journal   = {The statistician},
+  year      = {1992},
+  pages     = {169--178},
+  owner     = {pbac},
+  publisher = {JSTOR},
+  timestamp = {2018.02.26},
+}
+
+@InProceedings{juban2007probabilistic,
+  author    = {Juban, J{\'e}r{\'e}mie and Fugon, Lionel and Kariniotakis, Georges},
+  title     = {Probabilistic short-term wind power forecasting based on kernel density estimators},
+  booktitle = {European Wind Energy Conference and exhibition, EWEC 2007},
+  year      = {2007},
+  pages     = {http--ewec2007proceedings},
+  owner     = {pbac},
+  timestamp = {2018.02.17},
+}
+
+@Manual{Jordan2017,
+  title     = {scoringRules: Scoring Rules for Parametric and Simulated Distribution Forecasts},
+  author    = {Alexander Jordan and Fabian Krueger and Sebastian Lerch},
+  year      = {2017},
+  note      = {R package version 0.9.4},
+  file      = {Jordan2017.pdf:Jordan2017.pdf:PDF},
+  owner     = {pbac},
+  timestamp = {2018.02.17},
+  url       = {https://CRAN.R-project.org/package=scoringRules},
+}
+
+@Article{Scheuerer2015,
+  author    = {Scheuerer, Michael and Hamill, Thomas M},
+  title     = {Variogram-based proper scoring rules for probabilistic forecasts of multivariate quantities},
+  journal   = {Monthly Weather Review},
+  year      = {2015},
+  volume    = {143},
+  number    = {4},
+  pages     = {1321--1334},
+  file      = {Scheuerer2015.pdf:Scheuerer2015.pdf:PDF},
+  owner     = {pbac},
+  timestamp = {2018.02.17},
+}
+
+@Article{Zhang2014,
+  author    = {Zhang, Yao and Wang, Jianxue and Wang, Xifan},
+  title     = {Review on probabilistic forecasting of wind power generation},
+  journal   = {Renewable and Sustainable Energy Reviews},
+  year      = {2014},
+  volume    = {32},
+  pages     = {255--270},
+  file      = {Zhang2014.pdf:Zhang2014.pdf:PDF},
+  groups    = {Probabilistic},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.02.18},
+}
+
+@Article{Iversen2017,
+  author    = {Iversen, Emil B and Morales, Juan M and M{\o}ller, Jan K and Trombe, Pierre-Julien and Madsen, Henrik},
+  title     = {Leveraging stochastic differential equations for probabilistic forecasting of wind power using a dynamic power curve},
+  journal   = {Wind Energy},
+  year      = {2017},
+  volume    = {20},
+  number    = {1},
+  pages     = {33--44},
+  file      = {Iversen2017.pdf:Iversen2017.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.18},
+}
+
+@Article{Iversen2014a,
+  author    = {Iversen, Emil B and Morales, Juan M and M{\o}ller, Jan Kloppenborg and Madsen, Henrik},
+  title     = {Probabilistic forecasts of solar irradiance using stochastic differential equations},
+  journal   = {Environmetrics},
+  year      = {2014},
+  volume    = {25},
+  number    = {3},
+  pages     = {152--164},
+  file      = {Iversen2014a.pdf:Iversen2014a.pdf:PDF},
+  groups    = {Solar},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.26},
+}
+
+@Article{Moller2016,
+  author    = {M{\o}ller, Jan Kloppenborg and Zugno, Marco and Madsen, Henrik},
+  title     = {Probabilistic forecasts of wind power generation by stochastic differential equation models},
+  journal   = {Journal of Forecasting},
+  year      = {2016},
+  volume    = {35},
+  number    = {3},
+  pages     = {189--205},
+  file      = {Moller2016.pdf:Moller2016.pdf:PDF},
+  groups    = {Wind},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.26},
+}
+
+@Article{Tastu2011,
+  author    = {Tastu, Julija and Pinson, Pierre and Kotwa, Ewelina and Madsen, Henrik and Nielsen, Henrik Aa},
+  title     = {Spatio-temporal analysis and modeling of short-term wind power forecast errors},
+  journal   = {Wind Energy},
+  year      = {2011},
+  volume    = {14},
+  number    = {1},
+  pages     = {43--60},
+  file      = {Tastu2011.pdf:Tastu2011.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.26},
+}
+
+@TechReport{Ferreira2011,
+  author      = {Ferreira, C and Gama, Joao and Matias, L and Botterud, Audun and Wang, J},
+  title       = {A survey on wind power ramp forecasting.},
+  institution = {Argonne National Laboratory (ANL)},
+  year        = {2011},
+  owner       = {pbac},
+  timestamp   = {2018.02.26},
+}
+
+@Article{Gallego2015,
+  author    = {Gallego-Castillo, Cristobal and Cuerva-Tejero, Alvaro and Lopez-Garcia, Oscar},
+  title     = {A review on the recent history of wind power ramp forecasting},
+  journal   = {Renewable and Sustainable Energy Reviews},
+  year      = {2015},
+  volume    = {52},
+  pages     = {1148--1157},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.02.26},
+}
+
+@Article{Andersen2000,
+  author    = {Andersen, Klaus Kaae and Madsen, Henrik and Hansen, Lars H.},
+  title     = {Modelling the heat dynamics of a building using stochastic differential equations},
+  journal   = {Energy and Buildings},
+  year      = {2000},
+  volume    = {31},
+  number    = {1},
+  pages     = {13-24},
+  issn      = {03787788},
+  abstract  = {This paper describes the continuous time modelling of the heat dynamics of a building. The considered building is a residential like test house divided into two test rooms with a water based central heating. Each test room is divided into thermal zones in order to describe both short and long term variations. Besides modelling the heat transfer between thermal zones, attention is put on modelling the heat input from radiators and solar radiation. The applied modelling procedure is based on collected building performance data and statistical methods. The statistical methods are used in parameter estimation and model validation, while physical knowledge is used in forming the model structure. The suggested lumped parameter model is thus based on thermodynamics and formulated as a system of stochastic differential equations. Due to the continuous time formulation the parameters of the model are directly physical interpretable. Finally, the prediction and simulation performance of the model is illustrated.},
+  file      = {Andersen2000.pdf:Andersen2000.pdf:PDF},
+  groups    = {Buildings},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Gneiting2004,
+  author    = {Gneiting, Tilmann and Raftery, Adrian E and Westveld III, Anton H and Goldman, Tom},
+  title     = {Calibrated probabilistic forecasting using ensemble model output statistics and minimum CRPS estimation},
+  journal   = {Monthly Weather Review},
+  year      = {2004},
+  volume    = {133},
+  number    = {5},
+  pages     = {1098--1118},
+  file      = {Gneiting2004.pdf:Gneiting2004.pdf:PDF},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Gneiting2006a,
+  author    = {Gneiting, Tilmann and Larson, Kristin and Westrick, Kenneth and Genton, Marc G and Aldrich, Eric},
+  title     = {Calibrated probabilistic forecasting at the stateline wind energy center: The regime-switching space--time method},
+  journal   = {Journal of the American Statistical Association},
+  year      = {2006},
+  volume    = {101},
+  number    = {475},
+  pages     = {968--979},
+  file      = {Gneiting2006a.pdf:Gneiting2006a.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Taylor \& Francis},
+  timestamp = {2018.02.26},
+}
+
+@InCollection{Tastu2015,
+  author    = {Tastu, Julija and Pinson, Pierre and Madsen, Henrik},
+  title     = {Space-time trajectories of wind power generation: Parametrized precision matrices under a Gaussian copula approach},
+  booktitle = {Modeling and Stochastic Learning for Forecasting in High Dimensions},
+  publisher = {Springer},
+  year      = {2015},
+  pages     = {267--296},
+  file      = {Tastu2015.pdf:Tastu2015.pdf:PDF},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@Article{Pinson2009a,
+  author    = {Pinson, Pierre and Madsen, Henrik and Nielsen, Henrik Aa and Papaefthymiou, George and Kl{\"o}ckl, Bernd},
+  title     = {From probabilistic forecasts to statistical scenarios of short-term wind power production},
+  journal   = {Wind energy},
+  year      = {2009},
+  volume    = {12},
+  number    = {1},
+  pages     = {51--62},
+  file      = {Pinson2009a.pdf:Pinson2009a.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Wiley Online Library},
+  timestamp = {2018.02.26},
+}
+
+@Article{Pinson2012a,
+  author    = {Pinson, Pierre and Girard, Robin},
+  title     = {Evaluating the quality of scenarios of short-term wind power generation},
+  journal   = {Applied Energy},
+  year      = {2012},
+  volume    = {96},
+  pages     = {12--20},
+  file      = {Pinson2012a.pdf:Pinson2012a.pdf:PDF},
+  groups    = {Evaluation},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.02.26},
+}
+
+@Book{Pinson2013,
+  title     = {Discrimination ability of the energy score},
+  publisher = {DTU Informatics},
+  year      = {2013},
+  author    = {Pinson, Pierre and Tastu, Julija},
+  file      = {Pinson2013.pdf:Pinson2013.pdf:PDF},
+  groups    = {Evaluation},
+  owner     = {pbac},
+  timestamp = {2018.02.26},
+}
+
+@InProceedings{Nielsen2006c,
+  author       = {Nielsen, Henrik Aalborg and Nielsen, Torben Skov and Madsen, Henrik and Giebel, Gregor and Badger, Jake and Landberg, Lars and Sattler, Kai and Voulund, Lars and Tofting, John},
+  title        = {From wind ensembles to probabilistic information about future wind power production--results from an actual application},
+  booktitle    = {Probabilistic Methods Applied to Power Systems, 2006. PMAPS 2006. International Conference on},
+  year         = {2006},
+  pages        = {1--8},
+  organization = {IEEE},
+  file         = {Nielsen2006c.pdf:Nielsen2006c.pdf:PDF},
+  groups       = {Evaluation},
+  owner        = {pbac},
+  timestamp    = {2018.02.26},
+}
+
+@Article{Bao2010,
+  author    = {Bao, Le and Gneiting, Tilmann and Grimit, Eric P and Guttorp, Peter and Raftery, Adrian E},
+  title     = {Bias correction and Bayesian model averaging for ensemble forecasts of surface wind direction},
+  journal   = {Monthly Weather Review},
+  year      = {2010},
+  volume    = {138},
+  number    = {5},
+  pages     = {1811--1821},
+  file      = {Bao2010.pdf:Bao2010.pdf:PDF},
+  groups    = {Grey-box modeling},
+  owner     = {pbac},
+  timestamp = {2018.02.27},
+}
+
+@Article{Morales2010,
+  author    = {Morales, Juan M and Minguez, Roberto and Conejo, Antonio J},
+  title     = {A methodology to generate statistically dependent wind speed scenarios},
+  journal   = {Applied Energy},
+  year      = {2010},
+  volume    = {87},
+  number    = {3},
+  pages     = {843--855},
+  file      = {Morales2010.pdf:Morales2010.pdf:PDF},
+  groups    = {Grey-box modeling},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.02.27},
+}
+
+@Article{Xie2014,
+  author    = {Xie, Le and Gu, Yingzhong and Zhu, Xinxin and Genton, Marc G},
+  title     = {Short-term spatio-temporal wind power forecast in robust look-ahead power system dispatch},
+  journal   = {IEEE Transactions on Smart Grid},
+  year      = {2014},
+  volume    = {5},
+  number    = {1},
+  pages     = {511--520},
+  file      = {Xie2014.pdf:Xie2014.pdf:PDF},
+  groups    = {Grey-box modeling},
+  owner     = {pbac},
+  publisher = {IEEE},
+  timestamp = {2018.02.27},
+}
+
+@Article{He2014,
+  author    = {He, Miao and Yang, Lei and Zhang, Junshan and Vittal, Vijay},
+  title     = {A spatio-temporal analysis approach for short-term forecast of wind farm generation},
+  journal   = {IEEE Transactions on Power Systems},
+  year      = {2014},
+  volume    = {29},
+  number    = {4},
+  pages     = {1611--1622},
+  file      = {He2014.pdf:He2014.pdf:PDF},
+  groups    = {Grey-box modeling},
+  owner     = {pbac},
+  publisher = {IEEE},
+  timestamp = {2018.02.27},
+}
+
+@Article{Papavasiliou2011,
+  author    = {Papavasiliou, Anthony and Oren, Shmuel S and O'Neill, Richard P},
+  title     = {Reserve requirements for wind power integration: A scenario-based stochastic programming framework},
+  journal   = {IEEE Transactions on Power Systems},
+  year      = {2011},
+  volume    = {26},
+  number    = {4},
+  pages     = {2197--2206},
+  file      = {Papavasiliou2011.pdf:Papavasiliou2011.pdf:PDF},
+  owner     = {pbac},
+  publisher = {IEEE},
+  timestamp = {2018.02.28},
+}
+
+@Article{Morales2009,
+  author    = {Morales, Juan M and Conejo, Antonio J and P{\'e}rez-Ruiz, Juan},
+  title     = {Economic valuation of reserves in power systems with high penetration of wind power},
+  journal   = {IEEE Transactions on Power Systems},
+  year      = {2009},
+  volume    = {24},
+  number    = {2},
+  pages     = {900--910},
+  owner     = {pbac},
+  publisher = {IEEE},
+  timestamp = {2018.02.28},
+}
+
+@Article{Sun2009,
+  author    = {Sun, YZ and Wu, Jun and Li, GJ and He, Jian},
+  title     = {Dynamic economic dispatch considering wind power penetration based on wind speed forecasting and stochastic programming},
+  journal   = {Proceedings of the CSEE},
+  year      = {2009},
+  volume    = {29},
+  number    = {4},
+  pages     = {41--47},
+  owner     = {pbac},
+  timestamp = {2018.02.28},
+}
+
+@InCollection{Oksendal2003,
+  author    = {{\O}ksendal, Bernt},
+  title     = {Stochastic differential equations},
+  booktitle = {Stochastic differential equations},
+  publisher = {Springer},
+  year      = {2003},
+  pages     = {65--84},
+  owner     = {pbac},
+  timestamp = {2018.02.28},
+}
+
+@Article{Gel2004,
+  author    = {Gel, Yulia and Raftery, Adrian E and Gneiting, Tilmann},
+  title     = {Calibrated probabilistic mesoscale weather field forecasting: The geostatistical output perturbation method},
+  journal   = {Journal of the American Statistical Association},
+  year      = {2004},
+  volume    = {99},
+  number    = {467},
+  pages     = {575--583},
+  file      = {Gel2004.pdf:Gel2004.pdf:PDF},
+  owner     = {pbac},
+  publisher = {Taylor \& Francis},
+  timestamp = {2018.02.28},
+}
+
+@Article{Wilks2004,
+  author    = {Wilks, DS},
+  title     = {The minimum spanning tree histogram as a verification tool for multidimensional ensemble forecasts},
+  journal   = {Monthly Weather Review},
+  year      = {2004},
+  volume    = {132},
+  number    = {6},
+  pages     = {1329-1340},
+  issn      = {15200493, 00270644},
+  abstract  = {The minimum spanning tree (MST) histogram is a multivariate extension of the ideas behind the conventional scalar rank histogram. It tabulates the frequencies, over n forecast occasions, of the rank of the MST length for each ensemble, within the group of such lengths that is obtained by substituting an observation for each of its ensemble members in turn. In raw form it is unable to distinguish ensemble bias from ensemble underdispersion, or to discern the contributions of forecast variables with small variance. The use of scaled and debiased MST histograms to diagnose attributes of ensemble forecasts is illustrated, both for synthetic Gaussian ensembles and for a small sample of actual ensemble forecasts. Also presented are adjustments to x 2 critical values for evaluating rank uniformity, for both MST histograms and scalar rank histograms, given serial correlation in the forecasts.},
+  doi       = {10.1175/1520-0493(2004)132<1329:TMSTHA>2.0.CO;2},
+  file      = {Wilks2004.pdf:Wilks2004.pdf:PDF},
+  language  = {eng},
+  owner     = {pbac},
+  publisher = {AMER METEOROLOGICAL SOC},
+  timestamp = {2018.03.01},
+}
+
+@Book{Ljung2008,
+  title     = {System Identification Toolbox 7: Getting Started Guide},
+  publisher = {The MathWorks},
+  year      = {2008},
+  author    = {Ljung, Lennart},
+  owner     = {pbac},
+  timestamp = {2018.03.11},
+}
+
+@Manual{Spliid2017,
+  title     = {marima: Multivariate ARIMA and ARIMA-X Analysis},
+  author    = {Henrik Spliid},
+  year      = {2017},
+  note      = {R package version 2.2},
+  owner     = {pbac},
+  timestamp = {2018.03.11},
+  url       = {https://CRAN.R-project.org/package=marima},
+}
+
+@Article{persson2017multi,
+  author    = {Persson, Caroline and Bacher, Peder and Shiga, Takahiro and Madsen, Henrik},
+  title     = {Multi-site solar power forecasting using gradient boosted regression trees},
+  journal   = {Solar Energy},
+  year      = {2017},
+  volume    = {150},
+  pages     = {423--436},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.06.27},
+}
+
+@Article{antonanzas2016a,
+  author    = {Antonanzas, J. and Osorio, N. and Escobar, R. and Urraca, R. and Martinez-de-Pison, F. J. and Antonanzas-Torres, F.},
+  title     = {Review of photovoltaic power forecasting},
+  journal   = {Solar Energy},
+  year      = {2016},
+  volume    = {136},
+  pages     = {78-111},
+  issn      = {14711257, 0038092x},
+  abstract  = {Variability of solar resource poses difficulties in grid management as solar penetration rates rise continuously. Thus, the task of solar power forecasting becomes crucial to ensure grid stability and to enable an optimal unit commitment and economical dispatch. Several forecast horizons can be identified, spanning from a few seconds to days or weeks ahead, as well as spatial horizons, from single site to regional forecasts. New techniques and approaches arise worldwide each year to improve accuracy of models with the ultimate goal of reducing uncertainty in the predictions. This paper appears with the aim of compiling a large part of the knowledge about solar power forecasting, focusing on the latest advancements and future trends. Firstly, the motivation to achieve an accurate forecast is presented with the analysis of the economic implications it may have. It is followed by a summary of the main techniques used to issue the predictions. Then, the benefits of point/regional forecasts and deterministic/probabilistic forecasts are discussed. It has been observed that most recent papers highlight the importance of probabilistic predictions and they incorporate an economic assessment of the impact of the accuracy of the forecasts on the grid. Later on, a classification of authors according to forecast horizons and origin of inputs is presented, which represents the most up-to-date compilation of solar power forecasting studies. Finally, all the different metrics used by the researchers have been collected and some remarks for enabling a fair comparison among studies have been stated.},
+  doi       = {10.1016/j.solener.2016.06.069},
+  language  = {eng},
+  owner     = {pbac},
+  publisher = {Elsevier Ltd},
+  timestamp = {2018.06.27},
+}
+
+@InCollection{ML04,
+  author    = {Mohammed, AzharAhmed and Yaqub, Waheeb and Aung, Zeyar},
+  title     = {Probabilistic Forecasting of Solar Power: An Ensemble Learning Approach},
+  booktitle = {Intelligent Decision Technologies},
+  publisher = {Springer International Publishing},
+  year      = {2015},
+  editor    = {Neves-Silva, Rui and Jain, Lakhmi C. and Howlett, Robert J.},
+  volume    = {39},
+  series    = {Smart Innovation, Systems and Technologies},
+  pages     = {449-458},
+  isbn      = {978-3-319-19856-9},
+  doi       = {10.1007/978-3-319-19857-6_38},
+  keywords  = {Solar power; Probabilistic forecasting; Pinball loss function; Ensemble learning},
+  language  = {English},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://dx.doi.org/10.1007/978-3-319-19857-6_38},
+}
+
+@Article{bessa2015a,
+  author    = {Bessa, R. J. and Trindade, A. and Silva, Catia S. P. and Miranda, V.},
+  title     = {Probabilistic solar power forecasting in smart grids using distributed information},
+  journal   = {International Journal of Electrical Power and Energy Systems},
+  year      = {2015},
+  volume    = {72},
+  pages     = {16-23},
+  issn      = {18793517, 01420615},
+  abstract  = {The deployment of Smart Grid technologies opens new opportunities to develop new forecasting and optimization techniques. The growth of solar power penetration in distribution grids imposes the use of solar power forecasts as inputs in advanced grid management functions. This paper proposes a new forecasting algorithm for 6 h ahead based on the vector autoregression framework, which combines distributed time series information collected by the Smart Grid infrastructure. Probabilistic forecasts are generated for the residential solar photovoltaic (PV) and secondary substation levels. The test case consists of 44 micro-generation units and 10 secondary substations from the Smart Grid pilot in Evora, Portugal. The benchmark model is the well-known autoregressive forecasting method (univariate approach). The average improvement in terms of root mean square error (point forecast evaluation) and continuous ranking probability score (probabilistic forecast evaluation) for the first 3 lead-times was between 8% and 12%, and between 1.4% and 5.9%, respectively. (C) 2015 Published by Elsevier Ltd.},
+  doi       = {10.1016/j.ijepes.2015.02.006},
+  language  = {und},
+  owner     = {pbac},
+  publisher = {ELSEVIER SCI LTD},
+  timestamp = {2018.06.27},
+}
+
+@Article{ML06b,
+  author    = {Almeida, Marcelo Pinho and Perpion, Oscar and Narvarte, Luis},
+  title     = {{PV} power forecast using a nonparametric {PV} model},
+  journal   = {Solar Energy},
+  year      = {2015},
+  volume    = {115},
+  pages     = {pp. 354--368},
+  issn      = {0038092x, 14711257},
+  language  = {English},
+  owner     = {pbac},
+  publisher = {Elsevier B.V.},
+  timestamp = {2018.06.27},
+}
+
+@Article{ML06a,
+  author    = {M. Zamo and O. Mestre and P. Arbogast and O. Pannekoucke},
+  title     = {A benchmark of statistical regression methods for short-term forecasting of photovoltaic electricity production. Part {II}: Probabilistic forecast of daily production},
+  journal   = {Solar Energy},
+  year      = {2014},
+  volume    = {105},
+  pages     = {pp. 804--816},
+  issn      = {0038-092X},
+  doi       = {http://dx.doi.org/10.1016/j.solener.2014.03.026},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0038092X14001601},
+}
+
+@Article{ML06,
+  author    = {M. Zamo and O. Mestre and P. Arbogast and O. Pannekoucke},
+  title     = {A benchmark of statistical regression methods for short-term forecasting of photovoltaic electricity production, part {I}: Deterministic forecast of hourly production},
+  journal   = {Solar Energy},
+  year      = {2014},
+  volume    = {105},
+  pages     = {pp. 792--803},
+  issn      = {0038-092X},
+  doi       = {http://dx.doi.org/10.1016/j.solener.2013.12.006},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0038092X13005239},
+}
+
+@Article{smartpers,
+  author    = {Marquez, Ricardo and Pedro, Hugo TC and Coimbra, Carlos FM},
+  title     = {Hybrid solar forecasting method uses satellite imaging and ground telemetry as inputs to ANNs},
+  journal   = {Solar Energy},
+  year      = {2013},
+  volume    = {92},
+  pages     = {176--188},
+  owner     = {pbac},
+  publisher = {Elsevier},
+  timestamp = {2018.06.27},
+}
+
+@Article{marquez2013a,
+  author    = {Marquez, Ricardo and Coimbra, Carlos F. M.},
+  title     = {Proposed Metric for Evaluation of Solar Forecasting Models},
+  journal   = {Journal of Solar Energy Engineering-transactions of the Asme},
+  year      = {2013},
+  volume    = {135},
+  number    = {1},
+  pages     = {011016},
+  issn      = {15288986, 01996231},
+  abstract  = {This work presents an alternative metric for evaluating the quality of solar forecasting models. Some conventional approaches use quantities such as the root-mean-squareerror (RMSE) and/or correlation coefficients to evaluate model quality. The direct use of statistical quantities to assign forecasting quality can be misleading because these metrics do not convey a measure of the variability of the time-series for the solar irradiance data. In contrast, the quality metric proposed here, which is defined as the ratio of solar uncertainty to solar variability, compares the forecasting error with the solar variability directly. By making the forecasting error to variability comparisons for different time windows, we show that this ratio is essentially a statistical invariant for each forecast model employed, i.e., the ratio is preserved for widely different time horizons when the same time averaging periods are used, and therefore provides a robust way to compare solar forecasting skills. We employ the proposed metric to evaluate two new forecasting models proposed here, and compare their performances with a persistence model. [DOI: 10.1115/1.4007496]},
+  doi       = {10.1115/1.4007496},
+  language  = {und},
+  owner     = {pbac},
+  publisher = {ASME},
+  timestamp = {2018.06.27},
+}
+
+@Manual{JMAMSM,
+  title     = {Outline of the operational numerical weather prediction at the Japan Meteorological Agency},
+  author    = {Japan Meteorological Agency JMA},
+  year      = {2013},
+  note      = {Last update: 2013 (accessed 02/12/2015)},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://www.jma.go.jp/jma/jma-eng/jma-center/nwp/outline2013-nwp/pdf/outline2013_03.pdf},
+}
+
+@Article{huang2013a,
+  author    = {Huang, Jing and Korolkiewicz, Ma and Agrawal, Manju and Boland, John},
+  title     = {Forecasting solar radiation on an hourly time scale using a Coupled AutoRegressive and Dynamical System (CARDS) model},
+  journal   = {Solar Energy},
+  year      = {2013},
+  volume    = {87},
+  pages     = {pp. 136--149},
+  language  = {English},
+  owner     = {pbac},
+  publisher = {Elsevier Science Ltd.},
+  timestamp = {2018.06.27},
+}
+
+@Article{hoff2013a,
+  author    = {Hoff, Thomas E. and Perez, Richard and Kleissl, Jan and Renne, David and Stein, Joshua},
+  title     = {Reporting of irradiance modeling relative prediction errors},
+  journal   = {Progress in Photovoltaics},
+  year      = {2013},
+  volume    = {21},
+  number    = {7},
+  pages     = {1514-1519},
+  issn      = {1099159x, 10627995},
+  abstract  = {Metrics used in assessing irradiance model accuracy, such as root mean square error and mean absolute error, are precisely defined. Their relative (%) counterpart, however, can be subject to interpretation and may cover a wide range of values for a given set of data depending on reporting practice. This note evaluates different approaches for the reporting of relative metrics quantifying the dispersion accuracy of a model and formulates recommendations for the most appropriate approach. Copyright (c) 2012 John Wiley & Sons, Ltd.},
+  doi       = {10.1002/pip.2225},
+  language  = {und},
+  owner     = {pbac},
+  publisher = {WILEY-BLACKWELL},
+  timestamp = {2018.06.27},
+}
+
+@Article{coimbra2013a,
+  author    = {Coimbra, Carlos F M and Kleissl, Jan and Marquez, Ricardo},
+  title     = {Overview of Solar-Forecasting Methods and a Metric for Accuracy Evaluation},
+  journal   = {Solar Energy Forecasting and Resource Assessment},
+  year      = {2013},
+  pages     = {171-194},
+  abstract  = {This chapter provides an introduction to and overview of the subsequent chapters, which discuss specific solar-forecasting technologies and time horizons. Solar-forecasting methods are classified by technique, time horizon, and application. Advantages and disadvantages of deterministic and stochastic forecasting approaches are laid out and discussed in the context of solar forecasting based on numerical weather prediction, satellite data, and ground measurements. Metrics to evaluate solar-forecasting techniques are then presented and a time horizon-invariant metric is introduced that allows comparing forecast errors across time horizons, geographical regions, and time steps. Finally, the metric is demonstrated with hour-ahead forecasts based on stochastic-learning and satellite cloud-motion vector techniques. © 2013 Elsevier Inc. All rights reserved.},
+  doi       = {10.1016/B978-0-12-397177-7.00008-5},
+  isbn      = {9780123971777},
+  language  = {eng},
+  owner     = {pbac},
+  publisher = {Elsevier Inc.},
+  timestamp = {2018.06.27},
+}
+
+@InProceedings{ML03,
+  author    = {Ragnacci, A. and Pastorelli, M. and Valigi, P. and Ricci, E.},
+  title     = {Exploiting dimensionality reduction techniques for photovoltaic power forecasting},
+  booktitle = {Energy Conference and Exhibition (ENERGYCON), 2012 IEEE International},
+  year      = {2012},
+  pages     = {867--872},
+  month     = {Sept},
+  doi       = {10.1109/EnergyCon.2012.6348273},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{ML05,
+  author    = {Hugo T.C. Pedro and Carlos F.M. Coimbra},
+  title     = {Assessment of forecasting techniques for solar power production with no exogenous inputs},
+  journal   = {Solar Energy},
+  year      = {2012},
+  volume    = {86},
+  number    = {7},
+  pages     = {pp. 2017--2028},
+  issn      = {0038-092X},
+  doi       = {http://dx.doi.org/10.1016/j.solener.2012.04.004},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0038092X12001429},
+}
+
+@InProceedings{ML01,
+  author    = {Sharma, N. and Sharma, P. and Irwin, D. and Shenoy, P.},
+  title     = {Predicting solar generation from weather forecasts using machine learning},
+  booktitle = {Smart Grid Communications (SmartGridComm), 2011 IEEE International Conference on},
+  year      = {2011},
+  pages     = {528--533},
+  month     = {Oct},
+  doi       = {10.1109/SmartGridComm.2011.6102379},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{scikit-learn,
+  author    = {Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
+  title     = {Scikit-learn: Machine Learning in {P}ython},
+  journal   = {Journal of Machine Learning Research},
+  year      = {2011},
+  volume    = {12},
+  pages     = {pp. 2825--2830},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{ML04a,
+  author    = {Ricardo Marquez and Carlos F.M. Coimbra},
+  title     = {Forecasting of global and direct solar irradiance using stochastic learning methods, ground experiments and the NWS database},
+  journal   = {Solar Energy},
+  year      = {2011},
+  volume    = {85},
+  number    = {5},
+  pages     = {pp. 746--756},
+  issn      = {0038-092X},
+  doi       = {http://dx.doi.org/10.1016/j.solener.2011.01.007},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://www.sciencedirect.com/science/article/pii/S0038092X11000193},
+}
+
+@Book{Bible10,
+  title     = {The Elements of Statistical Learning},
+  publisher = {Springer},
+  year      = {2011},
+  author    = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome},
+  chapter   = {10},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Manual{EU2050,
+  title     = {A Roadmap for moving to a competitive low carbon economy in 2050},
+  author    = {European Commision},
+  year      = {2011},
+  note      = {accessed 15/02/2016},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+  url       = {http://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:52011DC0112},
+}
+
+@InProceedings{ML01a,
+  author    = {Sharma, N. and Gummeson, J. and Irwin, D. and Shenoy, P.},
+  title     = {Cloudy Computing: Leveraging Weather Forecasts in Energy Harvesting Sensor Systems},
+  booktitle = {Sensor Mesh and Ad Hoc Communications and Networks (SECON), 2010 7th Annual IEEE Communications Society Conference on},
+  year      = {2010},
+  pages     = {1-9},
+  month     = {June},
+  doi       = {10.1109/SECON.2010.5508260},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Book{madsen2008a,
+  title     = {Time series analysis},
+  publisher = {Chapman and Hall/CRC},
+  year      = {2008},
+  author    = {Madsen, Henrik},
+  isbn      = {142005967x, 9781420059670},
+  owner     = {pbac},
+  pages     = {380 s},
+  timestamp = {2018.06.27},
+}
+
+@InProceedings{4587404,
+  author    = {Minyoung Kim and V. Pavlovic},
+  title     = {Dimensionality reduction using covariance operator inverse regression},
+  booktitle = {Computer Vision and Pattern Recognition, 2008. CVPR 2008. IEEE Conference on},
+  year      = {2008},
+  pages     = {1-8},
+  month     = {June},
+  doi       = {10.1109/CVPR.2008.4587404},
+  issn      = {1063-6919},
+  keywords  = {correlation methods;covariance analysis;learning (artificial intelligence);matrix algebra;nonlinear functions;regression analysis;covariance operator inverse regression;dimensionality reduction for regression;kernel Gram matrices;linear input subspaces;nonlinear basis functions;nonlinear method;statistical correlation;transduction setting;Closed-form solution;Computer science;Computer vision;Covariance matrix;Data visualization;Kernel;Linear discriminant analysis;Noise reduction;Principal component analysis;Supervised learning},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{ridgeway2008a,
+  author    = {Ridgeway, Greg},
+  title     = {Generalized Boosted Models: A guide to the gbm package},
+  journal   = {Update},
+  year      = {2007},
+  volume    = {1},
+  number    = {1},
+  pages     = {2007},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{madsen2005a,
+  author    = {Madsen, Henrik and Pinson, Pierre and Kariniotakis, G. and Nielsen, Henrik Aalborg and Nielsen, Torben Skov},
+  title     = {Standardizing the performance evaluation of short-term wind prediction models},
+  journal   = {Wind Engineering},
+  year      = {2005},
+  volume    = {29},
+  number    = {6},
+  pages     = {pp. 475--489},
+  issn      = {2048402x, 0309524x},
+  language  = {eng},
+  owner     = {pbac},
+  timestamp = {2018.06.27},
+}
+
+@Article{koenker,
+  author    = {Koenker, Roger},
+  title     = {Quantile regression},
+  journal   = {Quantile Regression},
+  year      = {2005},
+  pages     = {pp. 1--349},
+  doi       = {10.1017/CBO9780511754098},
+  isbn      = {9780521845731, 9780511754098},
+  language  = {eng},
+  owner     = {pbac},
+  publisher = {Cambridge University Press},
+  timestamp = {2018.06.27},
+}
+
+@Article{reda2004a,
+  author    = {Reda, I and Andreas, A},
+  title     = {Solar position algorithm for solar radiation applications},
+  journal   = {Solar Energy},
+  year      = {2004},
+  volume    = {76},
+  number    = {5},
+  pages     = {pp. 577-589},
+  issn      = {14711257, 0038092x},
+  doi       = {10.1016/j.solener.2003.12.003},
+  language  = {English},
+  owner     = {pbac},
+  publisher = {Elsevier Science Ltd.},
+  timestamp = {2018.06.27},
+}
+
+@Article{Bacher2016a,
+  author   = {Peder Bacher and Philip Anton de Saint-Aubain and Lasse Engbo Christiansen and Henrik Madsen},
+  title    = {Non-parametric method for separating domestic hot water heating spikes and space heating},
+  journal  = {Energy and Buildings},
+  year     = {2016},
+  volume   = {130},
+  pages    = {107 - 112},
+  issn     = {0378-7788},
+  abstract = {In this paper a method for separating spikes from a noisy data series, where the data change and evolve over time, is presented. The method is applied on measurements of the total heat load for a single family house. It relies on the fact that the domestic hot water heating is a process generating short-lived spikes in the time series, while the space heating changes in slower patterns during the day dependent on the climate and user behavior. The challenge is to separate the domestic hot water heating spikes from the space heating without affecting the natural noise in the space heating measurements. The assumption behind the developed method is that the space heating can be estimated by a non-parametric kernel smoother, such that every value significantly above this kernel smoother estimate is identified as a domestic hot water heating spike. First, it is showed how a basic kernel smoothing approach is too simple to deliver reliable results. Therefore the problem is generalized to a local least squares problem, which makes it possible to design a robust kernel smoother, which estimate is not affected by the spikes. Furthermore, the generalized model makes it possible to estimate higher order local polynomials. Finally, the results are evaluated and it is found that the method is capable of calculating a reliable separation of the total heat load into the two components.},
+  doi      = {https://doi.org/10.1016/j.enbuild.2016.08.037},
+  file     = {Bacher2016a.pdf:articles/Bacher2016a.pdf:PDF},
+  keywords = {Separation of total heat load, Kernel smoother, Robust estimation, Statistical modeling, Time series analyses, Smart grid, Smart metering, Heat metering},
+  url      = {http://www.sciencedirect.com/science/article/pii/S0378778816307332},
+}
+
+@Comment{jabref-meta: databaseType:bibtex;}
+
+@Comment{jabref-meta: fileDirectory-pbac-nbpbac:/home/pbac/j/literature/articles;}
+
+@Comment{jabref-meta: groupstree:
+0 AllEntriesGroup:;
+1 ExplicitGroup:Buildings\;0\;;
+2 ExplicitGroup:single house forecasting\;0\;;
+2 ExplicitGroup:kernelpaper\;0\;;
+1 ExplicitGroup:Adaptive Autocorrelation\;0\;;
+1 ExplicitGroup:Collector testing\;0\;;
+1 ExplicitGroup:NotReadYet\;0\;;
+1 ExplicitGroup:solar\;2\;;
+2 ExplicitGroup:correction\;0\;;
+2 ExplicitGroup:forecasting\;0\;;
+1 ExplicitGroup:phdthesis\;2\;;
+1 ExplicitGroup:EVT\;0\;;
+1 ExplicitGroup:Identifiability\;0\;;
+1 ExplicitGroup:EnergyMdlAndOp.\;0\;;
+2 ExplicitGroup:Modeling\;0\;;
+3 ExplicitGroup:Grey-box modeling\;0\;;
+4 ExplicitGroup:Buildings\;0\;;
+3 ExplicitGroup:Forecasting\;0\;;
+4 ExplicitGroup:wind\;0\;;
+4 ExplicitGroup:solar\;0\;;
+4 ExplicitGroup:load\;0\;;
+3 ExplicitGroup:Other models\;0\;;
+3 ExplicitGroup:PerformanceKPI\;0\;;
+3 ExplicitGroup:Markets\;0\;;
+3 ExplicitGroup:Planning\;0\;;
+2 ExplicitGroup:Optimization\;0\;;
+3 ExplicitGroup:MPC\;0\;;
+3 ExplicitGroup:Markets\;0\;;
+2 ExplicitGroup:TheoreticalModeling\;0\;;
+2 ExplicitGroup:OtherEnergyRelated\;0\;;
+1 ExplicitGroup:Forecasting\;0\;;
+2 ExplicitGroup:OptimalBidding\;0\;;
+2 ExplicitGroup:Probabilistic\;2\;;
+3 ExplicitGroup:Temporal\;2\;;
+4 ExplicitGroup:Wind\;2\;;
+4 ExplicitGroup:Solar\;0\;;
+4 ExplicitGroup:Evaluation\;0\;;
+3 ExplicitGroup:Spacial-temporal\;2\;;
+4 ExplicitGroup:Wind\;0\;;
+}
diff --git a/vignettes/make.R b/vignettes/make.R
new file mode 100644
index 0000000000000000000000000000000000000000..fe04dc7053ed19ab111d8f37f0f47776a9b22b03
--- /dev/null
+++ b/vignettes/make.R
@@ -0,0 +1,31 @@
+library(knitr)
+library(rmarkdown)
+
+## Put the files in this dir (ignored in the git)
+dirnam <- "tmp-output/"
+dir.create(dirnam)
+
+makeit <- function(nam, openit=FALSE){
+    namrmd <- paste0(nam,".Rmd")
+    render(namrmd, output_file=paste0(dirnam,nam))
+    purl(namrmd)
+    system(paste0("mv ",nam,".R ",dirnam,nam,".R"))
+    if(openit){ system(paste0("chromium-browser ",dirnam,nam,".html &")) }
+}
+
+file.remove(dir("tmp-output/tmp-setup-data/", full.names=TRUE))
+makeit("setup-data", openit=FALSE)
+
+#
+file.remove(dir("cache", full.names=TRUE))
+file.remove("cache")
+file.remove(dir("tmp-output/tmp-setup-and-use-model/", full.names=TRUE))
+makeit("setup-and-use-model", openit=FALSE)
+
+#
+file.remove(dir("tmp-output/tmp-forecast-evaluation/", full.names=TRUE))
+makeit("forecast-evaluation", openit=FALSE)
+
+#
+file.remove(dir("tmp-output/tmp-online-updating/", full.names=TRUE))
+makeit("online-updating", openit=FALSE)
diff --git a/vignettes/online-updating.Rmd b/vignettes/online-updating.Rmd
new file mode 100644
index 0000000000000000000000000000000000000000..4e08d3d348379d3358f6e19593fd6293e6052196
--- /dev/null
+++ b/vignettes/online-updating.Rmd
@@ -0,0 +1,137 @@
+---
+title: "Online updating of onlineforecast models"
+author: "Peder Bacher"
+date: "`r Sys.Date()`"
+output:
+  rmarkdown::html_vignette:
+    toc: true
+    toc_debth: 3
+vignette: >
+  %\VignetteIndexEntry{Online updating of onlineforecast models}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding{UTF-8}
+bibliography: literature.bib
+---
+
+```{r external-code, cache=FALSE, include=FALSE, purl = FALSE}
+# Have to load the knitr to use hooks
+library(knitr)
+# This vignettes name
+vignettename <- "online-updating"
+# Read external code from init.R
+knitr::read_chunk("init.R")
+```
+```{r init, cache=FALSE, include=FALSE, purl=FALSE}
+```
+
+
+## Intro
+This vignette explains how to 
+
+Load the package:
+```{r}
+# Load the package
+#library(onlineforecast)
+library(devtools)
+load_all(as.package("../../onlineforecast"))
+```
+
+Load data, setup and define a model:
+```{r, output.lines=10}
+# Keep the data in D to simplify notation
+D <- Dbuildingheatload
+# Set the score period 
+D$scoreperiod <- in_range("2010-12-20", D$t)
+# Set the training period
+D$trainperiod <- in_range(D$t[1], D$t, "2011-02-01")
+# Define a new model with low-pass filtering of the Ta input
+model <- forecastmodel$new()
+model$output = "heatload"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+                 mu = "ones()")
+model$add_regprm("rls_prm(lambda=0.9)")
+model$add_prmbounds(Ta__a1 = c(0.5, 0.9, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+model$kseq <- c(3,18)
+# Optimize the parameters
+model$prm <- rls_optim(model, subset(D,D$trainperiod))$par
+```
+
+
+
+## Recursive update and prediction
+
+How to get new data and update and predict.
+
+First fit on a period
+```{r}
+iseq <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+Dfit <- subset(D, iseq)
+model$kseq <- 1:36
+rls_fit(model$prm, model, Dfit)
+```
+
+Now the fits are saved in the model object (its an R6 object, hence passed by reference to the functions and can be changed inside the functions). A list of fits with an entry for each horizon is in Lfits, see the two first
+```{r}
+str(model$Lfits[1:2])
+```
+
+Now new data arrives, take the point right after the fit period
+```{r}
+(i <- iseq[length(iseq)] + 1)
+Dnew <- subset(D, i)
+```
+
+First we need to transform the new data (This must only be done once for each new data, since some transform functions, e.g. lp(), actually keep states, see the detailed vignette in ??)
+```{r}
+Dnew_transformed <- model$transform_data(Dnew)
+```
+
+Then we can update the parameters using the transformed data
+```{r}
+rls_update(model, Dnew_transformed, Dnew[[model$output]])
+```
+
+Calculate predictions using the new data and the updated fits (rls coefficient estimates in model$Lfits[[k]]$theta)
+```{r}
+yhat <- rls_predict(model, Dnew_transformed)
+```
+
+Plot to see that it fits the observations
+```{r}
+iseq <- i+model$kseq
+plot(D$t[iseq], D$heatload[iseq], type = "b", xlab = "t", ylab = "y")
+lines(D$t[iseq], yhat, type = "b", col = 2)
+legend("topright", c("observations",pst("predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+```
+
+Run this for a longer period to verify that the same forecasts are obtained (in one go vs. iteratively)
+
+First in one go
+```{r}
+val <- rls_fit(model$prm, model, D, returnanalysis = TRUE)
+D$Yhat1 <- val$Yhat
+```
+
+and then iteratively
+```{r}
+itrain <- which(in_range("2010-12-15",D$t,"2011-01-01"))
+itest <- which(in_range("2011-01-01",D$t,"2011-01-04"))
+rls_fit(model$prm, model, subset(D, itrain))
+
+D$Yhat2 <- data.frame(matrix(NA, nrow(D$Yhat1), ncol(D$Yhat1)))
+names(D$Yhat2) <- names(D$Yhat1)
+for(i in itest){
+    Dnew <- subset(D, i)
+    Dnewtr <- model$transform_data(Dnew)
+    rls_update(model, Dnewtr, Dnew[[model$output]])
+    D$Yhat2[i, ] <- as.numeric(rls_predict(model, Dnewtr))
+}
+```
+
+Compare to see the difference between the one step forecasts
+```{r}
+D$Yhat1$k1[itest] - D$Yhat2$k1[itest]
+```
+
+Note about model$reset_states()
diff --git a/vignettes/setup-and-use-model.Rmd b/vignettes/setup-and-use-model.Rmd
new file mode 100644
index 0000000000000000000000000000000000000000..037a94760accb3116ad4de291410b6a3fe50e0ab
--- /dev/null
+++ b/vignettes/setup-and-use-model.Rmd
@@ -0,0 +1,428 @@
+---
+title: "Setup and use onlineforecast models"
+author: "Peder Bacher"
+date: "`r Sys.Date()`"
+output:
+  rmarkdown::html_vignette:
+    toc: true
+    toc_debth: 3
+vignette: >
+  %\VignetteIndexEntry{Setup and use onlineforecast models}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding{UTF-8}
+bibliography: literature.bib
+---
+
+```{r external-code, cache=FALSE, include=FALSE, purl = FALSE}
+# Have to load the knitr to use hooks
+library(knitr)
+# This vignettes name
+vignettename <- "setup-and-use-model"
+# Read external code from init.R
+knitr::read_chunk("init.R")
+```
+```{r init, cache=FALSE, include=FALSE, purl=FALSE}
+```
+
+
+## Intro
+This vignette explains how to setup and use an onlineforecast
+model. This takes offset in the example of building heat load forecasting??(ref)
+and assumes that the data is setup correctly, as explained in
+[setup-data vignette](setup-data.html).
+
+Load the package:
+```{r}
+## Load the package
+library(onlineforecast)
+```
+
+Just start by:
+```{r}
+# Keep the data in D to simplify notation
+D <- Dbuildingheatload
+```
+
+
+## Score period
+
+Set the `scoreperiod` as a logical vector with same length as `t`. It controls
+which points are included in score calculations in the package functions when the parameters are being optimized. this
+must be set in the `data.list` used.
+
+Use it to exclude a burn-in period of one week:
+```{r}
+# Print the first time point
+D$t[1]
+# Set the score period 
+D$scoreperiod <- in_range("2010-12-22", D$t)
+# Plot to see it
+plot(D$t, D$scoreperiod, xlab="Time", ylab="Scoreperiod")
+```
+Other periods, which should be excluded from score calculations, can simply
+also be set to `FALSE`. E.g.:
+```{r}
+# Exclude other points example
+scoreperiod2 <- D$scoreperiod
+scoreperiod2[in_range("2010-12-30",D$t,"2011-01-02")] <- FALSE
+```
+would exclude the days around new year (must of course be set in
+`D$scoreperiod`, not in `scoreperiod2` to have an effect).
+
+
+
+
+
+## Setting up a model
+
+A simple onlineforecast model can be setup by:
+```{r}
+# Generate new object (R6 class)
+model <- forecastmodel$new()
+# Set the model output
+model$output = "heatload"
+# Inputs (transformation step)
+model$add_inputs(Ta = "Ta",
+                 mu = "ones()")
+# Regression step parameters
+model$add_regprm("rls_prm(lambda=0.9)")
+# Optimization bounds for parameters
+model$add_prmbounds(lambda = c(0.9, 0.99, 0.9999))
+# Set the horizons for which the model will be fitted
+model$kseq <- c(3,18)
+```
+
+
+### Steps in setting up a model
+
+Let's go through the steps of setting up the model.
+
+First a new forecastmodel object is generated and the model output is set (per
+default it is `"y"`):
+```{r}
+# Generate new object
+model <- forecastmodel$new()
+# Set the model output
+model$output = "heatload"
+```
+The output is simply the variable name from `D` we want to forecast.
+
+The model inputs are defined by:
+```{r}
+# Inputs (transformation step)
+model$add_inputs(Ta = "Ta",
+                 mu = "ones()")
+```
+So this is really where the structure of the model is specified. The inputs are
+given a name (`Ta` and `mu`), which each are set as an R expression (in a
+string). The expressions defines the **transformation step**: they will each
+be evaluated in an environment with a given `data.list`. This means that the
+variables from the data (e.g. `D`) can be used in the expressions - below in [Input transformations] we will detail this evaluation.
+
+Next step for setting up the model is to set the parameters for the **regression
+step** by providing an expression, as a string, which returns the regression
+parameter values. In the present case we will use the Recursive Least Squares
+(RLS) when regressing and we need to set the forgetting factor `lambda` by:
+```{r}
+# Regression step parameters
+model$add_regprm("rls_prm(lambda=0.9)")
+```
+
+The expression is just of a function, which returns
+a list - in this case with the value of `lambda` ??(see onlineforecast
+vignette). The result of it begin evaluated is kept in:
+```{r}
+# The evaluation happens with
+eval(parse(text="rls_prm(lambda=0.9)"))
+# and the result is stored in
+model$regprm 
+```
+
+We will tune the parameters, for this model it's only the forgetting
+factor, so we set the parameter bounds (lower, init, upper) for it by:
+```{r}
+# Optimization bounds for parameters
+model$add_prmbounds(lambda = c(0.9, 0.99, 0.9999))
+```
+
+Finally, we set the horizons for which to fit:
+```{r}
+# Set the horizons for which the model will be fitted
+model$kseq <- c(3,18)
+```
+The horizons to fit for is actually not directly related to the model, but
+rather the fitting of the model. In principle, it would be more "clean" if the
+model, data and fit was kept separate, however for recursive fitting this
+becomes un-feasible.
+<!-- see more ??(ref, where to we emphasize the recursive
+fitting, maybe a vignette by it self!?) -->
+
+
+### Tune the parameters
+
+We have set up the model and can now tune the `lambda` with the `rls_optim()`,
+which is a wrapper for the `optim()` function:
+```{r, output.lines=15}
+# Call the optim() wrapper
+model$prm <- rls_optim(model, D)$par
+```
+Note, how it only calculated a score for the 3 and 18 steps
+horizons - as we specified with `model$kseq` above. The parameters could be
+optimized separately for each horizon, for example it is often such that for the
+first horizons a very low forgetting factor is optimal (e.g. 0.9). Currently,
+however, the parameters can only be optimized together. By optimizing for a
+short (3 steps) and a long horizon (18 steps), we obtain a balance - using less computations compared to optimizing on all horizons.
+
+The optimization converge and the tuned parameter becomes:
+```{r}
+# Optimized lambda
+model$prm
+```
+
+Now we can fit with the optimized `lambda` on all horizons over the entire period:
+```{r}
+# Set to fit for all horizons
+model$kseq <- 1:36
+# Fit for all on entire period in D
+fit1 <- rls_fit(model$prm, model, D)
+```
+
+See the summary of the fit:
+```{r}
+# See the summary of the fit
+summary(fit1)
+```
+See `?summary.rls_fit` for details.
+
+
+Plot the forecasts (`Yhat` adheres to the forecast matrix format and in
+`plot_ts()` the forecasts are lagged `k` steps to be aligned with the observations):
+```{r}
+# Put the forecasts in D
+D$Yhat1 <- fit1$Yhat
+# Plot them for selected horizons
+plot_ts(D, c("^heatload|^Y"), kseq = c(1,6,18,36))
+```
+We clearly see the burn-in period, where the forecasts vary a lot, 
+
+Plot a forecast for a particular time point and forward in time:
+```{r, fig.height=4}
+# Select a point
+i <- 996-48
+# and kseq steps ahead
+iseq <- i+model$kseq
+# The observations ahead in time
+plot(D$t[iseq], D$heatload[iseq], type = "b", xlab = "t", ylab = "y")
+title(main=pst("Forecast available at ",D$t[i]))
+# The forecasts
+lines(D$t[iseq], D$Yhat1[i, ], type = "b", col = 2)
+legend("topright", c("Observations",pst("Predictions (",min(model$kseq)," to ",max(model$kseq)," steps ahead)")), lty = 1, col = 1:2)
+```
+
+
+## Input transformations
+
+The inputs can be transformations of the variables in the data, i.e. `D` in this
+example. The function `ones()` generate a forecast matrix of 1 for the needed
+horizons. It cannot be called directly:
+```{r, eval=FALSE}
+# This will give error
+ones()
+```
+(the code above was not executed)
+
+however we can see the result of the evaluation by:
+```{r}
+# Evaluate input expressions
+datatr <- model$transform_data(D)
+# See what came out
+summary(datatr)
+# In particular for the mu = "ones()"
+head(datatr$mu)
+```
+
+If we wanted to debug we could:
+```{r, eval=FALSE}
+# Set to debug
+#debug(ones)
+# Run the input transformation now and it will stop in ones()
+datatr <- model$transform_data(D)
+# Set to undebug
+#undebug(ones)
+```
+(the code above was not executed).
+
+
+Let's extend the model by adding a low-pass filter transformation of the
+ambient temperature forecasts. We could just update the input by:
+```{r}
+# Just update the Ta input by
+model$add_inputs(Ta = "lp(Ta, a1=0.9)")
+```
+
+but let's just repeat the whole model definition for clarification - including
+the new transformation: 
+```{r} 
+# Define a new model with low-pass filtering of the Ta input
+model <- forecastmodel$new()
+model$output = "heatload"
+model$add_inputs(Ta = "lp(Ta, a1=0.9)",
+                 mu = "ones()")
+model$add_regprm("rls_prm(lambda=0.9)")
+model$add_prmbounds(Ta__a1 = c(0.5, 0.9, 0.9999),
+                    lambda = c(0.9, 0.99, 0.9999))
+model$kseq <- c(3,18)
+```
+Note how also a new set of parameter bounds were added in `add_prmbounds()`
+following a neat little syntax: `Ta__a1` indicates that the first appearance of `a1` in the `Ta` input expression, will be changed in the optimization.
+
+We can see the parameter bounds with:
+```{r}
+model$prmbounds
+```
+
+To inspect the result of low-pass filtering:
+```{r}
+# Low-pass filter Ta (with a1=0.9 as defined above)
+datatr <- model$transform_data(D)
+# Actually, lp() can be called directly (although two warnings are thrown)
+Talp <- lp(D$Ta, a1=0.99)
+```
+and to see the result we could: 
+```{r}
+# Plot the Ta$k1 forecasts
+plot(D$t, D$Ta$k1, type="l")
+# Add the filtered with a1=0.9
+lines(D$t, datatr$Ta[ ,"k1"], col=2)
+# Add the filtered with a1=0.99
+lines(D$t, Talp[ ,"k1"], col=3)
+```
+hence with a low-pass coefficient `a1=0.99`, which is very high (max is 1), the
+Ta forecast is really smoothed, which models a system with a time constant
+(i.e. slow dynamics, e.g. well insulated and building with lots of concrete).
+
+There are quite a few functions available for input transformations:
+
+- `ones()` generates an matrix of ones (for including an intercept).
+- `fs()` generate Fourier series for modelling harmonic functions.
+- `bspline()` wraps the `bs()` function for generating base splines.
+- `AR()` generates auto-regressive model inputs.
+
+and they can even be combined, see more details in ??(ref) and in their help
+description, e.g. `?fs`.
+
+
+Tuning the two parameters: the low-pass filter coefficient `a1` and the
+forgetting factor `lambda`, can now be done:
+```{r, output.lines=15}
+# Optimize the parameters
+model$prm <- rls_optim(model, D)$par
+```
+
+
+Plot the forecasts (Yhat adheres to the forecast matrix format and in `plot_ts` the forecasts are lagged `k` steps to sync with the observations)
+```{r, fig.height=4}
+# Fit for all horizons
+model$kseq <- 1:36
+# Fit with RLS
+fit2 <- rls_fit(model$prm, model, D)
+# Take the forecasts
+D$Yhat2 <- fit2$Yhat
+# Plot all
+plot_ts(D, c("^heatload$|^Y"), kseq = c(1,18))
+```
+See more on how to extend this model even further in ??(ref til
+buildingloadforecast vignette på hjemmeside)
+
+We can see the summary:
+```{r}
+summary(fit2)
+```
+
+but more interesting is it to see if an improvement was achieved with the
+low-pass filtering, so calculate the RMSE for both models:
+```{r}
+# Calculate the score
+RMSE1 <- summary(fit1, printit=FALSE)$scoreval
+RMSE2 <- summary(fit2, printit=FALSE)$scoreval
+```
+Now, this is calculated for the points included in the `scoreperiod`, so it's
+important to make sure that exactly the same values are forecasted. A check can
+be done by:
+```{r}
+# Check that all NAs in the scoreperiod are at the same positions
+all(is.na(fit1$Yhat[fit1$data$scoreperiod, ]) == is.na(fit2$Yhat[fit2$data$scoreperiod, ]))
+```
+
+Finally, plot the RMSE for the two models:
+```{r}
+# Plot the score for the two models
+plot(RMSE1, xlab="Horizon k", ylab="RMSE", type="b", ylim=range(RMSE1,RMSE2))
+lines(RMSE2, type="b", col=2)
+legend("topleft", c("Input: Ta","Input: Low-pass Ta"), lty=1, col=1:2)
+```
+We can see, that we obtained improvements. Around 3-4% for the longer horizons.
+
+For more on evaluation, see the vignette ??(ref til forecast-evaluation.html)
+
+For more development of the load forecast model see ??(building load forecast).
+
+
+
+## Time of day and using observations as input
+
+### Time of day as input
+
+Often we need to have the time of day as an input to a forecastmodel:
+```{r, output.lines=28}
+make_tday(D$t, kseq=1:3)
+```
+So we can use it like this:
+```{r}
+D$tday <- make_tday(D$t, 1:36)
+```
+See the help `?make_tday` for more details.
+
+
+### Using observations as input
+
+If we want to use observations in inputs to a model, we can use e.g.:
+```{r}
+D$Tao <- make_input(D$Ta.obs, kseq=1:36)
+model$add_inputs(Tao = "lp(Tao, a1=0.99)")
+```
+
+
+## Caching of optimized parameters
+
+Working with time consuming calculations caching can be very
+valuable. The optimization results can be cached by providing a path to a
+directory:
+```{r, output.lines=15}
+rls_optim(model, D, cachedir="cache")$par
+```
+where cache files are saved:
+```{r}
+dir("cache")
+```
+so running it again will read the cache instead of calculating the optimization:
+```{r}
+rls_optim(model, D, cachedir="cache")$par
+```
+
+Remove the cache by:
+```{r}
+file.remove(dir("cache", full.names=TRUE))
+file.remove("cache")
+```
+
+
+## Deep clone model
+
+Usually, an object of an R6 class can be copied (in memory) deeply with
+'$clone(deep=TRUE)', however that will result in problems with the
+forecastmodels, therefore the deep clone must be done by:
+```{r}
+m1 <- model$clone_deep()
+```
+See `?R6` for details on R6 objects.
diff --git a/vignettes/setup-data.Rmd b/vignettes/setup-data.Rmd
new file mode 100644
index 0000000000000000000000000000000000000000..7f758b547ddea260409babbd6d12751bd5ee7531
--- /dev/null
+++ b/vignettes/setup-data.Rmd
@@ -0,0 +1,374 @@
+---
+title: "Setup of data for an onlineforecast model"
+author: "Peder Bacher"
+date: "`r Sys.Date()`"
+output:
+  rmarkdown::html_vignette:
+    toc: true
+    toc_debth: 3
+vignette: >
+  %\VignetteIndexEntry{Setup of data for an onlineforecast model}
+  %\VignetteEngine{knitr::rmarkdown}
+  %\VignetteEncoding{UTF-8}
+bibliography: literature.bib
+---
+
+
+```{r external-code, cache=FALSE, include=FALSE, purl = FALSE}
+# Have to load the knitr to use hooks
+library(knitr)
+## This vignettes name
+vignettename <- "setup-data"
+## Read external code
+knitr::read_chunk("init.R")
+```
+```{r init, cache=FALSE, include=FALSE, purl=FALSE}
+```
+
+
+## Intro
+This vignette explains how to setup data consisting of
+observations and forecasts, such that it can be used for onlineforecast
+models. A generic introduction and description is in available in the paper
+?(ref) [onlineforecasting vignette](onlineforecasting.pdf). ??(more on the other
+vignettes and website and code from this vignette)
+
+
+## Data example
+
+First load the package:
+```{r}
+## Load the package
+library(onlineforecast)
+```
+
+In the package different data sets are included. The
+`Dbuildingheatload` holds the data used for the example of
+heat load forecasting in the building-heat-load-forecasting vignette.
+
+When the package is loaded the data is also loaded, so we can access it
+directly. Let's start out by:
+```{r}
+## Keep it in D to simplify notation
+D <- Dbuildingheatload
+```
+
+The class is 'data.ĺist':
+```{r}
+## The class of D
+class(D)
+```
+
+Actually, a 'data.list' is simply a 'list', but we made the class 'data.list' in
+order to have functions for the particular format of data - the format is explained in this document.
+
+It consists of vectors of time, vectors of observations (model output) and
+data.frames of forecasts (model input):
+```{r}
+## Print the names to see the variables in the data
+names(D)
+```
+
+An overview of the content can be generated by:
+```{r}
+summary(D)
+```
+where it can be seen that `t` is a time vector, `heatload` is a vector, and `Ta` and `I` are data.frames.
+
+A function for carrying out a check of the format of the 'data.list' is:
+```{r}
+check(D)
+```
+Basically, if there is a `V` in the `ok` column, then the format of this
+variable in `D` is correct. See the help with `?check.data.list` to learn what the printed output means.
+
+
+### Time
+
+First, lets have a look at `D$t`, which is the vector of time points:
+```{r}
+## The time
+class(D$t)
+head(D$t)
+tail(D$t)
+```
+Hence, the vector is of the class `POSIXct`. It is not a necessity, `t`
+can also simply be a numeric, but for plotting and many operations, its
+very useful to use the 'POSIXct' class (see `?POSIXt`).
+
+Rules for the time vector:
+
+- It must be named `t`.
+
+- There must be no gaps or NA values in `t`, since only equidistant time series
+  can be used in the models (the other variables can have NAs).
+
+- Its best to keep the time zone in `UTC` or `GMT` (not providing any time
+  zone `tz` can give rise to problems).
+  
+
+Use the basic R functions for handling the time class. Most needed
+operations can be done with:
+```{r}
+?as.POSIXct
+?strftime
+```
+
+
+A helper function is provided with the `asp` function which can be called using `?`, or `?asp`. See example below:
+
+```{r}
+## Convert from a time stamp (tz="GMT" per default)
+asct("2019-01-01 11:00")
+## Convert from unix time
+asct(3840928387)
+```
+Note that for all functions where a time value as a character is given, the time
+zone is always "GMT" (or "UTC", but this can result in warnings, but they can be
+ignored). For some operations the package `lubridate` can
+be very helpful.
+
+
+### Observations
+
+Note the rules for observations:
+
+- In a `data.list` observations must be numeric vectors.
+
+- The vectors must have the same length as the time `t` vector.
+
+- Observation as vectors can be used directly as model output (if observations
+  are to used as model inputs, they must be setup in a data.frame as explained
+  below in Section [Forecasts]).
+
+
+In the current data, a time series of hourly heat load observations is included:
+
+```{r}
+str(D$heatload)
+```
+
+It must have the same length as the time vector:
+```{r}
+## Same length as time
+length(D$t)
+length(D$heatload)
+```
+
+A simple plot can be generated by:
+```{r}
+plot(D$t, D$heatload, type="l", xlab="Time", ylab="Headload (kW)")
+```
+
+The convention used in all examples is that the time points are always
+set to the time interval end point, e.g.:
+```{r}
+## The observation
+D$heatload[2]
+## Represents the average load between
+D$t[1]
+## and
+D$t[2]
+```
+The main idea behind setting the time point at the end of the interval is:
+Working with values averaged over the time interval, such values are available
+at the end of the time interval, not before. Especially, in real-time
+applications this is a useful convention.
+
+
+### Forecasts
+
+As described in [onlineforecast](onlineforecast.pdf) the setup of forecasts for
+model inputs always follows the same format - as presented in the
+following. This is also the format of the forecasts generated by functions in the package. Hence all forecasts must follow this format.
+
+The rules are:
+
+- All values at row `i` are available at the `i`'th value in time `t`.
+
+- All columns must be named with `k` followed by an integer indicating the
+  horizon in steps (e.g. the column named `k8` hold the 8-step forecasts).
+
+
+Have a look at the forecasts of the global radiation:
+```{r}
+## Global radiation forecasts
+head(D$I)
+```
+
+At the first time point:
+```{r}
+## First time point
+D$t[1]
+```
+the available forecast ahead in time is at the first row:
+```{r}
+## The forecast available ahead in time is in the first row
+D$I[1, ]
+```
+
+We can plot that by:
+```{r}
+i <- 1:ncol(D$I)
+plot(D$t[i], D$I[1, ], type="l", xlab="Time", ylab="Global radiation forecast (I in W/m²)")
+```
+So this is the forecast available ahead in time at `r D$t[1]`.
+
+The column in `I` named `k8` holds the 8-step horizon forecasts, which, since
+the steps are hourly, is an equi-distant time series. Picking out the
+entire series can be done by `D$I$k8` - hence a plot (together with the
+observations) can be generated by:
+```{r}
+## Just pick some points by
+i <- 200:296
+plot(D$t[i], D$I$k8[i], type="l", col=2, xlab="Time", ylab="Global radiation (W/m²)")
+## Add the observations
+lines(D$t[i], D$I.obs[i])
+legend("topright", c("8-step forecasts","Observations"), bg="white", lty=1, col=2:1)
+```
+
+Notice how the are not aligned, since the forecasts are 8 hours ahead. To align
+them the forecasts must be lagged 8 steps by:
+```{r}
+plot(D$t[i], lag(D$I$k8[i], 8), type="l", col=2, xlab="Time", ylab="Global radiation (W/m²)")
+lines(D$t[i], D$I.obs[i])
+legend("topright", c("8-step forecasts lagged","Observations"), bg="white", lty=1, col=2:1)
+```
+
+
+
+## Plotting
+
+A few simple plotting functions are included in the package.
+
+
+### Time series plots
+
+The plot function provided with the package actually does this lagging with plotting forecasts:
+```{r}
+plot_ts(D, patterns=c("^I"), c("2010-12-15","2010-12-18"), kseq=c(1,8,24,36))
+```
+
+The argument `patterns` is vector of a regular expressions (see `?regex`),
+which is used to match the variables to include in the plot. See the help with `?plot_ts` for more details.
+
+An interactive plot can be generated using (first install the package `plotly`):
+```{r, eval=FALSE}
+plotly_ts(D, patterns=c("heatload$","^I"), c("2010-12-15","2010-12-18"), kseq=c(1,8,24,36))
+```
+```{r, warning=FALSE, message=FALSE, echo=FALSE, purl=FALSE, output="hide"}
+L <- plotly_ts(D, patterns=c("heatload$","^I"), c("2010-12-15","2010-12-18"), kseq=c(1,8,24,36), plotit=FALSE)
+subplot(L, shareX=TRUE, nrows=length(L), titleY = TRUE)
+```
+Note that the `patterns` argument is a vector of regular expressions, which
+determines which variables from `D` to plot.
+
+
+
+### Scatter plots
+
+When modelling with the objective of forecasting, it's always a good start to
+have a look at scatter plots between the model inputs and the model output. For
+example the heatload vs. ambient temperature 8-step forecast:
+```{r, fig.width=2*fhs, fig.height=fhs, out.width=ows2}
+par(mfrow=c(1,2))
+plot(D$Ta$k8, D$heatload)
+plot(lag(D$Ta$k8, 8), D$heatload)
+```
+So lagging (thus aligning in time) makes less slightly less scatter.
+
+A wrapper for the `pairs` function is provided for a `data.list`, which can
+generate very useful explorative plots:
+```{r, fig.height=figwidth}
+pairs(D, nms=c("heatload","Ta.obs","Ta","t"), kseq=c(1,8,24))
+```
+Note how the sequence of included horizons are specified in the `kseq` argument,
+and note that the forecasts are lagged to be aligned in time. See `?pairs.data.list` for more details.
+
+Just as a quick side note: This is the principle used for fitting onlineforecast
+models, simply shift forecasts to align with the observations:
+```{r, fig.width=fhs, fig.height=fhs, out.width=ows}
+## Lag the 8-step forecasts to be aligned with the observations
+x <- lag(D$I$k8, 8)
+## Take a smaller range
+x <- x[i]
+## Take the observations
+y <- D$I.obs[i]
+## Fit a linear regression model
+fit <- lm(y ~ x)
+## Plot the result
+plot(x, y, xlab="8-step forecasts (W/m²)", ylab="Obsservations (W/m²)", main="Global radiation")
+abline(fit)
+```
+
+Seen over time the 8-step forecasts are:
+```{r}
+plot(D$t[i], predict.lm(fit, newdata=data.frame(x)), type="l", ylim=c(0,max(y)), xlab="Time", ylab="Global radiation (W/m^2)", col=2)
+lines(D$t[i], y)
+legend("topright", c("8-step forecasts lagged","Observations"), lty=1, col=2:1)
+```
+
+Of course that model was very simple, see how to make a better model in the
+??(examples and website)
+[solar forecast vignette](solar-power-forecasting.html).
+
+
+
+## Subset
+
+Taking a subset of a `data.list` is very useful and it can easily be done in
+different ways using the `subset` function (i.e. it's really the
+`subset.data.list` function called when:
+```{r}
+## Take the 1 to 4 values of each variable in D
+Dsub <- subset(D, 1:4)
+summary(Dsub)
+```
+
+Another useful function for taking data in a time range is:
+```{r}
+which(in_range("2010-12-20",D$t,"2010-12-21"))
+```
+always check the help of function for more details (i.e. `?in_range`)
+
+Actually, it's easy to take subset from a period by:
+```{r}
+Dsub <- subset(D, c("2010-12-20","2010-12-21"))
+summary(Dsub)
+Dsub$t
+```
+
+
+## Data.list to data.frame (or data.table)
+
+It can be really useful to bring the data.list on a format of a `data.frame` or
+equivalently `data.table` for processing. 
+
+Bringing to `data.frame` can easily be done by:
+```{r}
+Df <- as.data.frame(Dsub)
+names(Df)
+```
+So the forecasts are just bind with the time and observations, and `.kxx` is
+added to the column names.
+
+It can be converted to a `data.table` by:
+```{r}
+library(data.table)
+setDT(Df)
+class(Df)
+```
+
+After processing it is easily converted back to the `data.list` again by:
+```{r}
+## Set back to data.frame
+setDF(Df)
+## Convert to a data.list
+Dsub2 <- as.data.list(Df)
+## Compare it with the original Dsub
+summary(Dsub2)
+summary(Dsub)
+```
+
+
+## Literature