Skip to contents

Wrapper functions of available methods for related problems.

Usage

wrap_empty(x, y, family, alpha = 1)

wrap_separate(x, y, family, alpha = 1, lambda = NULL)

# S3 method for class 'wrap_separate'
predict(object, newx, ...)

# S3 method for class 'wrap_separate'
coef(object, ...)

wrap_common(x, y, family, alpha = 1)

# S3 method for class 'wrap_common'
predict(object, newx, ...)

# S3 method for class 'wrap_common'
coef(object, ...)

wrap_mgaussian(x, y, family = "gaussian", alpha = 1)

# S3 method for class 'wrap_mgaussian'
predict(object, newx, ...)

# S3 method for class 'wrap_mgaussian'
coef(object, ...)

wrap_spls(x, y, family = "gaussian", alpha = 1, nfolds = 10)

# S3 method for class 'wrap_spls'
predict(object, newx, ...)

# S3 method for class 'wrap_spls'
coef(object, ...)

wrap_glmtrans(x, y, family = "gaussian", alpha = 1)

# S3 method for class 'wrap_glmtrans'
predict(object, newx, ...)

# S3 method for class 'wrap_glmtrans'
coef(object, ...)

wrap_xrnet(
  x,
  y,
  alpha.init = 0.95,
  alpha = 1,
  nfolds = 10,
  family = "gaussian"
)

# S3 method for class 'wrap_xrnet'
predict(object, newx, ...)

# S3 method for class 'wrap_xrnet'
coef(object, ...)

Arguments

x

feature matrix (multi-task learning) or list of q feature matrices (transfer learning)

y

response matrix (multi-task learning) or list of q response vectors (transfer learning)

family

character vector with 1 or q entries, possible values are "gaussian" and sometimes "binomial" or other

alpha

elastic net mixing parameter: number between 0 and 1

lambda

sequence of regularisation parameters

object

output from multi-task learning or transfer learning method

newx

feature matrix (MTL) or list of feature matrices (TL) of testing samples

...

(not applicable)

nfolds

number of cross-validation folds: positive integer

alpha.init

elastic net mixing parameter for initial models: number between 0 and 1

Value

The wrapper functions wrap_empty, wrap_separate, wrap_common, wrap_mgaussian, wrap_spls, wrap_glmtrans, and wrap_xrnet return fitted models, and the generic functions coef and predict return coefficients or predicted values in a standardised format.

Functions

  • wrap_empty(): intercept-only model (MTL and TL)

  • wrap_separate(): separate model for each problem (MTL and TL)

  • wrap_common(): common model for all problems (TL)

  • wrap_mgaussian(): multivariate Gaussian regression (MTL)

  • wrap_spls(): sparse partial least squares (MTL)

  • wrap_glmtrans(): transfer generalised linear model (TL)

  • wrap_xrnet(): hierarchical regression (TL)

References

Noah Simon, Jerome H. Friedman, and Trevor Hastie (2013). arXiv (Preprint). doi:10.48550/arXiv.1311.6529 . (cv.glmnet)

Hyonho Chun and Sündüz Keleş (2010). "Sparse Partial Least Squares Regression for Simultaneous Dimension Reduction and Variable Selection". Journal of the Royal Statistical Society Series B: Statistical Methodology 72(1);3–25. doi:10.1111/j.1467-9868.2009.00723.x . (spls)

Ye Tian and Yang Feng (2022). "Transfer learning under high-dimensional generalized linear models". Journal of the American Statistical Association 118(544):2684-2697. doi:10.1080/01621459.2022.2071278 . (glmtrans)

Garrett M. Weaver and Juan Pablo Lewinger (2019). "xrnet: Hierarchical Regularized Regression to Incorporate External Data". Journal of Open Source Software 4(44):1761. doi:10.21105/joss.01761 . (xrnet)

See also

See original functions cv.glmnet (with argument family="mgaussian") spls, glmtrans, and xrnet.

Examples

#--- multi-task learning ---
n_train <- 100
n_test <- 10
p <- 50
q <- 3
family <- "gaussian"
x <- matrix(data=rnorm(n=n_train*p),nrow=n_train,ncol=p)
newx <- matrix(data=rnorm(n=n_test*p),nrow=n_test,ncol=p)
y <- matrix(data=rnorm(n_train*q),nrow=n_train,ncol=q)
object <- wrap_empty(x=x,y=y,family=family)
model <- "empty" # try "empty", "separate", "mgaussian" or "spls"
if(model=="empty"){
  object <- wrap_empty(x=x,y=y,family=family)
} else if(model=="separate"){
  object <- wrap_separate(x=x,y=y,family=family)
} else if(model=="mgaussian"){
  object <- wrap_mgaussian(x=x,y=y,family=family)
} else if(model=="spls"){
  object <- wrap_spls(x=x,y=y,family=family)
}
coef(object)
#> $alpha
#> [1] -0.0625940314 -0.0005838964  0.0181799997
#> 
#> $beta
#>       [,1] [,2] [,3]
#>  [1,]    0    0    0
#>  [2,]    0    0    0
#>  [3,]    0    0    0
#>  [4,]    0    0    0
#>  [5,]    0    0    0
#>  [6,]    0    0    0
#>  [7,]    0    0    0
#>  [8,]    0    0    0
#>  [9,]    0    0    0
#> [10,]    0    0    0
#> [11,]    0    0    0
#> [12,]    0    0    0
#> [13,]    0    0    0
#> [14,]    0    0    0
#> [15,]    0    0    0
#> [16,]    0    0    0
#> [17,]    0    0    0
#> [18,]    0    0    0
#> [19,]    0    0    0
#> [20,]    0    0    0
#> [21,]    0    0    0
#> [22,]    0    0    0
#> [23,]    0    0    0
#> [24,]    0    0    0
#> [25,]    0    0    0
#> [26,]    0    0    0
#> [27,]    0    0    0
#> [28,]    0    0    0
#> [29,]    0    0    0
#> [30,]    0    0    0
#> [31,]    0    0    0
#> [32,]    0    0    0
#> [33,]    0    0    0
#> [34,]    0    0    0
#> [35,]    0    0    0
#> [36,]    0    0    0
#> [37,]    0    0    0
#> [38,]    0    0    0
#> [39,]    0    0    0
#> [40,]    0    0    0
#> [41,]    0    0    0
#> [42,]    0    0    0
#> [43,]    0    0    0
#> [44,]    0    0    0
#> [45,]    0    0    0
#> [46,]    0    0    0
#> [47,]    0    0    0
#> [48,]    0    0    0
#> [49,]    0    0    0
#> [50,]    0    0    0
#> 
predict(object,newx=newx)
#> [[1]]
#>        lambda.min
#>  [1,] -0.06259403
#>  [2,] -0.06259403
#>  [3,] -0.06259403
#>  [4,] -0.06259403
#>  [5,] -0.06259403
#>  [6,] -0.06259403
#>  [7,] -0.06259403
#>  [8,] -0.06259403
#>  [9,] -0.06259403
#> [10,] -0.06259403
#> 
#> [[2]]
#>          lambda.min
#>  [1,] -0.0005838964
#>  [2,] -0.0005838964
#>  [3,] -0.0005838964
#>  [4,] -0.0005838964
#>  [5,] -0.0005838964
#>  [6,] -0.0005838964
#>  [7,] -0.0005838964
#>  [8,] -0.0005838964
#>  [9,] -0.0005838964
#> [10,] -0.0005838964
#> 
#> [[3]]
#>       lambda.min
#>  [1,]    0.01818
#>  [2,]    0.01818
#>  [3,]    0.01818
#>  [4,]    0.01818
#>  [5,]    0.01818
#>  [6,]    0.01818
#>  [7,]    0.01818
#>  [8,]    0.01818
#>  [9,]    0.01818
#> [10,]    0.01818
#> 

#--- transfer learning ---
n_train <- c(100,50)
n_test <- c(10,10)
p <- 50
x <- lapply(X=n_train,function(n) matrix(data=stats::rnorm(n*p),nrow=n,ncol=p))
newx <- lapply(X=n_test,function(n) matrix(data=stats::rnorm(n*p),nrow=n,ncol=p))
y <- lapply(X=n_train,function(n) stats::rnorm(n))
family <- "gaussian"
model <- "empty" # try "empty", "separate", "common", "glmtrans", or "xrnet"
if(model=="empty"){
 object <- wrap_empty(x=x,y=y,family=family)
} else if(model=="separate"){
 object <- wrap_separate(x=x,y=y,family=family)
} else if(model=="common"){
 object <- wrap_common(x=x,y=y,family=family)
} else if(model=="glmtrans"){
 object <- wrap_glmtrans(x=x,y=y,family=family)
} else if(model=="xrnet"){
 object <- wrap_xrnet(x=x,y=y,family=family)
}
coef(object)
#> $alpha
#> [1]  0.04248283 -0.04722426
#> 
#> $beta
#>       [,1] [,2]
#>  [1,]    0    0
#>  [2,]    0    0
#>  [3,]    0    0
#>  [4,]    0    0
#>  [5,]    0    0
#>  [6,]    0    0
#>  [7,]    0    0
#>  [8,]    0    0
#>  [9,]    0    0
#> [10,]    0    0
#> [11,]    0    0
#> [12,]    0    0
#> [13,]    0    0
#> [14,]    0    0
#> [15,]    0    0
#> [16,]    0    0
#> [17,]    0    0
#> [18,]    0    0
#> [19,]    0    0
#> [20,]    0    0
#> [21,]    0    0
#> [22,]    0    0
#> [23,]    0    0
#> [24,]    0    0
#> [25,]    0    0
#> [26,]    0    0
#> [27,]    0    0
#> [28,]    0    0
#> [29,]    0    0
#> [30,]    0    0
#> [31,]    0    0
#> [32,]    0    0
#> [33,]    0    0
#> [34,]    0    0
#> [35,]    0    0
#> [36,]    0    0
#> [37,]    0    0
#> [38,]    0    0
#> [39,]    0    0
#> [40,]    0    0
#> [41,]    0    0
#> [42,]    0    0
#> [43,]    0    0
#> [44,]    0    0
#> [45,]    0    0
#> [46,]    0    0
#> [47,]    0    0
#> [48,]    0    0
#> [49,]    0    0
#> [50,]    0    0
#> 
predict(object,newx=newx)
#> [[1]]
#>       lambda.min
#>  [1,] 0.04248283
#>  [2,] 0.04248283
#>  [3,] 0.04248283
#>  [4,] 0.04248283
#>  [5,] 0.04248283
#>  [6,] 0.04248283
#>  [7,] 0.04248283
#>  [8,] 0.04248283
#>  [9,] 0.04248283
#> [10,] 0.04248283
#> 
#> [[2]]
#>        lambda.min
#>  [1,] -0.04722426
#>  [2,] -0.04722426
#>  [3,] -0.04722426
#>  [4,] -0.04722426
#>  [5,] -0.04722426
#>  [6,] -0.04722426
#>  [7,] -0.04722426
#>  [8,] -0.04722426
#>  [9,] -0.04722426
#> [10,] -0.04722426
#>