Archive

Archive for the ‘Backtesting’ Category

Averaged Input Assumptions and Momentum

December 5, 2013 5 comments

Today I want to share another interesting idea contributed by Pierre Chretien. Pierre suggested using Averaged Input Assumptions and Momentum to create reasonably quiet strategy. The averaging techniques are used to avoid over-fitting any particular frequency.

To create Averaged Input Assumptions we combine returns over different look-back periods, giving more weight to the recent returns, to form overall Input Assumptions.

create.ia.averaged <- function(lookbacks, n.lag) {
	lookbacks = lookbacks
	n.lag = n.lag

	function(hist.returns, index=1:ncol(hist.returns), hist.all)
	{	
		nperiods = nrow(hist.returns)
		
		temp = c()
		for (n.lookback in lookbacks) 
			temp = rbind(temp, hist.returns[(nperiods - n.lookback - n.lag + 1):(nperiods - n.lag), ])
		create.ia(temp, index, hist.all)
	}	
}

To create Averaged Momentum we take a look-back weighted avaerage of momentums computed over different look-back periods.

momentum.averaged <- function(prices, 
	lookbacks = c(20,60,120,250) ,	# length of momentum look back
	n.lag = 3
) {
	momentum = 0 * prices
	for (n.lookback in lookbacks) {
		part.mom = mlag(prices, n.lag) / mlag(prices, n.lookback + n.lag) - 1
		momentum = momentum + 252 / n.lookback * part.mom
	}
	momentum / len(lookbacks)
}

Next let’s compare using historical Input Assumptions vs Averaged Input Assumptions and Momentum vs Averaged Momentum. I will consider Absolute Momentum (not cross sectional), for more information about relative and absolute momentum, please see

###############################################################################
# Load Systematic Investor Toolbox (SIT)
# https://systematicinvestor.wordpress.com/systematic-investor-toolbox/
###############################################################################
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
    source(con)
close(con)
 
	#*****************************************************************
	# Load historical data
	#****************************************************************** 
	load.packages('quantmod')
		
	# 10 funds
	tickers = spl('Us.Eq = VTI + VTSMX,
	Eurpoe.Eq = IEV + FIEUX,
	Japan.Eq = EWJ + FJPNX,
	Emer.Eq = EEM + VEIEX,
	Re = RWX + VNQ + VGSIX,		
	Com = DBC + QRAAX,
	Gold = GLD + SCGDX,
	Long.Tr = TLT + VUSTX,
	Mid.Tr = IEF + VFITX,
	Short.Tr = SHY + VFISX') 
	
	start.date = 1998
	
	dates = paste(start.date,'::',sep='') 
	
	data <- new.env()
	getSymbols.extra(tickers, src = 'yahoo', from = '1980-01-01', env = data, set.symbolnames = T, auto.assign = T)
		for(i in data$symbolnames) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)
	bt.prep(data, align='keep.all', dates=paste(start.date-2,':12::',sep=''), fill.gaps = T)

	#*****************************************************************
	# Setup
	#****************************************************************** 		
	prices = data$prices   
		n = ncol(prices)
		nperiods = nrow(prices)
		
	periodicity = 'months'
	period.ends = endpoints(prices, periodicity)
		period.ends = period.ends[period.ends > 0]
		
	max.product.exposure = 0.6	
	
	#*****************************************************************
	# Input Assumptions
	#****************************************************************** 	
	lookback.len = 40
	create.ia.fn = create.ia
	
	# input assumptions are averaged on 20, 40, 60 days using 1 day lag
	ia.array = c(20,40,60)
	avg.create.ia.fn = create.ia.averaged(ia.array, 1)

	#*****************************************************************
	# Momentum
	#****************************************************************** 	
	universe = prices > 0
	
	mom.lookback.len = 120	
	momentum = prices / mlag(prices, mom.lookback.len) - 1
	mom.universe = ifna(momentum > 0, F)
	
	# momentum is averaged on 20,60,120,250 days using 3 day lag
	mom.array = c(20,60,120,250)	
	avg.momentum = momentum.averaged(prices, mom.array, 3)
	avgmom.universe = ifna(avg.momentum > 0, F)

	#*****************************************************************
	# Algos
	#****************************************************************** 	
	min.risk.fns = list(
		EW = equal.weight.portfolio,
		MV = min.var.portfolio,
		MCE = min.corr.excel.portfolio,
				
		MV.RSO = rso.portfolio(min.var.portfolio, 3, 100, const.ub = max.product.exposure),
		MCE.RSO = rso.portfolio(min.corr.excel.portfolio, 3, 100, const.ub = max.product.exposure)
	)

	#*****************************************************************
	# Code Strategies
	#****************************************************************** 	
make.strategy.custom <- function(name, create.ia.fn, lookback.len, universe, env) {
	obj = portfolio.allocation.helper(data$prices, 
		periodicity = periodicity,
		universe = universe,
		lookback.len = lookback.len,
		create.ia.fn = create.ia.fn,
		const.ub = max.product.exposure,
		min.risk.fns = min.risk.fns,
		adjust2positive.definite = F
	)
	env[[name]] = create.strategies(obj, data, prefix=paste(name,'.',sep=''))$models
}


	models <- new.env()	
	make.strategy.custom('ia.none'        , create.ia.fn    , lookback.len, universe       , models)
	make.strategy.custom('ia.mom'         , create.ia.fn    , lookback.len, mom.universe   , models)
	make.strategy.custom('ia.avg_mom'     , create.ia.fn    , lookback.len, avgmom.universe, models)
	make.strategy.custom('avg_ia.none'    , avg.create.ia.fn, 252         , universe       , models)
	make.strategy.custom('avg_ia.mom'     , avg.create.ia.fn, 252         , mom.universe   , models)
	make.strategy.custom('avg_ia.avg_mom' , avg.create.ia.fn, 252         , avgmom.universe, models)
	
	#*****************************************************************
	# Create Report
	#*****************************************************************		
strategy.snapshot.custom <- function(models, n = 0, title = NULL) {
	if (n > 0)
		models = models[ as.vector(matrix(1:len(models),ncol=n, byrow=T)) ]	

	layout(1:3)	
	plotbt(models, plotX = T, log = 'y', LeftMargin = 3, main = title)
		mtext('Cumulative Performance', side = 2, line = 1)
	plotbt.strategy.sidebyside(models)
	barplot.with.labels(sapply(models, compute.turnover, data), 'Average Annual Portfolio Turnover', T)	
}

	# basic vs basic + momentum => momentum filter has better results
	models.final = c(models$ia.none, models$ia.mom)
	strategy.snapshot.custom(models.final, len(min.risk.fns), 'Momentum Filter')

	# basic vs basic + avg ia => averaged ia reduce turnover
	models.final = c(models$ia.none, models$avg_ia.none)
	strategy.snapshot.custom(models.final, len(min.risk.fns), 'Averaged Input Assumptions')

	# basic + momentum vs basic + avg.momentum => mixed results for averaged momentum
	models.final = c(models$ia.mom, models$ia.avg_mom)
	strategy.snapshot.custom(models.final, len(min.risk.fns), 'Averaged Momentum')

	# basic + momentum vs avg ia + avg.momentum
	models.final = c(models$ia.mom, models$avg_ia.avg_mom)
	strategy.snapshot.custom(models.final, len(min.risk.fns), 'Averaged vs Base')	

Above, I compared results for the following 4 cases:
1. Adding Momentum filter: all algos perfrom better
plot3

2. Input Assumptions vs Averaged Input Assumptions: returns are very similar, but using Averaged Input Assumptions helps reduce portfolio turnover.
plot2

3. Momentum vs Averaged Momentum: returns are very similar, but using Averaged Momentum increases portfolio turnover.
plot1

4. historical Input Assumptions + Momentum vs Averaged Input Assumptions + Averaged Momentum: results are mixed, no consistent advantage of using Averaged methods
plot4

Overall, the Averaged methods is a very interesting idea and I hope you will experiemtn with it and share your findings, like Pierre. Pierre, again thank you very much for sharing.

The full source code and example for the bt.averaged.test() function is available in bt.test.r at github.

Running Back-tests in parallel

November 11, 2013 2 comments

Once you start experimenting with many different asset allocation algorithms, the computation time of running the back-tests can be substantial. One simple way to solve the computation time problem is to run the back-tests in parallel. I.e. if the asset allocation algorithm does not use the prior period holdings to make decision about current allocation, we can run many periods in parallel.

In the Update for Backtesting Asset Allocation Portfolios post, I show cased the portfolio.allocation.helper() function in strategy.r at github. The portfolio.allocation.helper() function is a user-friendly interface to evaluate multiple asset allocation algorithms over given asset universe in a sequential fashion.

Following is a sample code from the Update for Backtesting Asset Allocation Portfolios post:

    #*****************************************************************
    # Code Strategies
    #******************************************************************                    
    obj = portfolio.allocation.helper(data$prices,
        periodicity = 'months', lookback.len = 60,
        min.risk.fns = list(
            EW=equal.weight.portfolio,
            RP=risk.parity.portfolio,
            MD=max.div.portfolio,                      
             
            MV=min.var.portfolio,
            MVE=min.var.excel.portfolio,
            MV2=min.var2.portfolio,
             
            MC=min.corr.portfolio,
            MCE=min.corr.excel.portfolio,
            MC2=min.corr2.portfolio,
             
            MS=max.sharpe.portfolio(),
            ERC = equal.risk.contribution.portfolio,
 
            # target retunr / risk
            TRET.12 = target.return.portfolio(12/100),                             
            TRISK.10 = target.risk.portfolio(10/100),
         
            # rso
            RSO.RP.5 = rso.portfolio(risk.parity.portfolio, 5, 500),
             
            # others
            MMaxLoss = min.maxloss.portfolio,
            MMad = min.mad.portfolio,
            MCVaR = min.cvar.portfolio,
            MCDaR = min.cdar.portfolio,
            MMadDown = min.mad.downside.portfolio,
            MRiskDown = min.risk.downside.portfolio,
            MCorCov = min.cor.insteadof.cov.portfolio
        )
    )

To run the same strategies in parallel, I created the portfolio.allocation.helper.parallel() function in strategy.r at github. There is one extra input that you need to specify: cores – number of CPU processors used for computations.

For example, the code below will use 2 CPU processors to run back-test computations. It will run faster than the portfolio.allocation.helper() function.

    #*****************************************************************
    # Code Strategies
    #******************************************************************                    
    obj = portfolio.allocation.helper.parallel(cores = 2, data$prices,
        periodicity = 'months', lookback.len = 60,
        min.risk.fns = list(
            EW=equal.weight.portfolio,
            RP=risk.parity.portfolio,
            MD=max.div.portfolio,                      
             
            MV=min.var.portfolio,
            MVE=min.var.excel.portfolio,
            MV2=min.var2.portfolio,
             
            MC=min.corr.portfolio,
            MCE=min.corr.excel.portfolio,
            MC2=min.corr2.portfolio,
             
            MS=max.sharpe.portfolio(),
            ERC = equal.risk.contribution.portfolio,
 
            # target retunr / risk
            TRET.12 = target.return.portfolio(12/100),                             
            TRISK.10 = target.risk.portfolio(10/100),
         
            # rso
            RSO.RP.5 = rso.portfolio(risk.parity.portfolio, 5, 500),
             
            # others
            MMaxLoss = min.maxloss.portfolio,
            MMad = min.mad.portfolio,
            MCVaR = min.cvar.portfolio,
            MCDaR = min.cdar.portfolio,
            MMadDown = min.mad.downside.portfolio,
            MRiskDown = min.risk.downside.portfolio,
            MCorCov = min.cor.insteadof.cov.portfolio
        )
    )

Hopefully, I did not ruin your prolong lunch plans 🙂

Commissions

November 5, 2013 1 comment

Today, I want to explain the commission’s functionality build in to Systematic Investor Toolbox(SIT) “share” back-test.

At each re-balance time the capital is allocated given the weight such that

	share = weight * capital / price
	cash  = capital - share * price

For example, if weight is 100% (i.e. fully invested) and capital = $100 and price = $10 then

	share = 10 shares
	cash = $0

The period return is equal to

	return = [share * price + cash] / [share * price.yesterday + cash] - 1

The total return is equal to

	total.return = [1 + return.0] * [1 + return.1] * ... * [1 + return.n] - 1

The period returns constructed this way let me construct portfolio returns without using loops
I.e. if share, price, cash are matrices, then

	portfolio.return = rowSums((share * price + cash) / (share * mlag(price) + cash) - 1)

and

	equity = cumprod(1 + portfolio.return)

To introduce commissions into above framework, I had to make following assumptions.
Let’ assume that P0 and P1 are stock prices and com is commission that is very small relative to stock price then

	[P0 - com] / P0 is close (equal) to P0 / [P0 + com] and
	[P0 - com] * P1 is close (equal) to P0 * [P1 - com]

Given commissions, the period return formula used in SIT is equal to

	return = [share * price + cash - commission] / [share * price.yesterday + cash] - 1

Now let’s look at the example of trade with commissions:

	Let's say we are fully invested (i.e. cash = 0 and capital = share * P0)

	opening trade cost = share * P0 + com
	closing  trade cost = share * P1 - com

	return = [closing  trade cost] / [opening trade cost] - 1
	= [share * P1 - com] / [share * P0 + com] - 1

In SIT, these computations are equivalent to

	([capital - com] / [capital]) * ([share * P1 + cash - com] / [share * P0 + cash]) - 1

	< given that cash = 0 and capital = share * P0 >
	= ([share * P0 - com] / [share * P0]) * ([share * P1 - com] / [share * P0]) - 1

	< given [P0 - com] / P0 ~ P0 / [P0 + com] >
	= ([share * P0] / [share * P0 + com] / ) * ([share * P1 - com] / [share * P0]) - 1

	= [share * P1 - com] / [share * P0 + com] - 1

Hence, as long as commissions are small relative to whole trade the returns produced with SIT will be very close to the true returns.

SIT currently supports following 3 types of commissions

  • cents / share commission
    	commission = abs(share - mlag(share)) * commission$cps
    
  • fixed commission
    	commission = sign(abs(share - mlag(share))) * commission$fixed
    
  • percentage commission
    	commission = price * abs(share - mlag(share)) * commission$percentage
    

You can mix and match these commission methods in any way,

	the total.commission = cps.commission + fixed.commission + percentage.commission

and period return is equal to

	return = (share * price + cash - total.commission) / (share * mlag(price) + cash) - 1

Next let’s see the impact of different type of commissions. There two ways to specify the commissions.

  • commission = 0.1

    will be translated in

    commission = list(cps = 0.1, fixed = 0.0, percentage = 0.0)
  • commission = list(cps = 0.0, fixed = 0.0, percentage = 0.0)
###############################################################################
# Load Systematic Investor Toolbox (SIT)
# https://systematicinvestor.wordpress.com/systematic-investor-toolbox/
###############################################################################
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
    source(con)
close(con)

	#*****************************************************************
	# Load historical data
	#****************************************************************** 
	load.packages('quantmod')	
	tickers = spl('EEM')

	data <- new.env()
	getSymbols(tickers, src = 'yahoo', from = '1970-01-01', env = data, auto.assign = T)
		for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)			
	bt.prep(data, align='keep.all', dates='2013:08::2013:09')	
		
	#*****************************************************************
	# Code Strategies
	#****************************************************************** 		
	buy.date = '2013:08:14'	
	sell.date = '2013:08:15'
	day.after.sell.date = '2013:08:16'		
	
	capital = 100000
	prices = data$prices
	share = as.double(capital / prices[buy.date])
	
	# helper function to compute trade return
	comp.ret <- function(sell.trade.cost, buy.trade.cost) { round(100 * (as.double(sell.trade.cost) / as.double(buy.trade.cost) - 1), 2) }
	
	#*****************************************************************
	# Zero commission
	#****************************************************************** 
	data$weight[] = NA
		data$weight[buy.date] = 1
		data$weight[sell.date] = 0
		commission = 0.0
	model = bt.run.share(data, commission = commission, capital = capital, silent = T)
		
	comp.ret( share * prices[sell.date], share * prices[buy.date] )		
	comp.ret( model$equity[day.after.sell.date], model$equity[buy.date] )		
			
	#*****************************************************************
	# 10c cps commission
	# cents / share commission
   	#   trade cost = abs(share - mlag(share)) * commission$cps	
	#****************************************************************** 
	data$weight[] = NA
		data$weight[buy.date] = 1
		data$weight[sell.date] = 0
		commission = 0.1
	model = bt.run.share(data, commission = commission, capital = capital, silent = T)

	comp.ret( share * (prices[sell.date] - commission), share * (prices[buy.date] + commission) )
	comp.ret( model$equity[day.after.sell.date], model$equity[buy.date] )		
	
	#*****************************************************************
	# $5 fixed commission
	# fixed commission per trade to more effectively to penalize for turnover
   	#   trade cost = sign(abs(share - mlag(share))) * commission$fixed	
	#****************************************************************** 
	data$weight[] = NA
		data$weight[buy.date] = 1
		data$weight[sell.date] = 0
		commission = list(cps = 0.0, fixed = 5.0, percentage = 0.0)	
	model = bt.run.share(data, commission = commission, capital = capital, silent = T)

	comp.ret( share * prices[sell.date] - commission$fixed, share * prices[buy.date] + commission$fixed )
	comp.ret( model$equity[day.after.sell.date], model$equity[buy.date] )		
	
	#*****************************************************************
	# % commission
	# percentage commission
	#   trade cost = price * abs(share - mlag(share)) * commission$percentage	
	#****************************************************************** 
	data$weight[] = NA
		data$weight[buy.date] = 1
		data$weight[sell.date] = 0
		commission = list(cps = 0.0, fixed = 0.0, percentage = 1/100)	
	model = bt.run.share(data, commission = commission, capital = capital, silent = T)

	comp.ret( share * prices[sell.date] * (1 - commission$percentage), share * prices[buy.date] * (1 + commission$percentage) )
	comp.ret( model$equity[day.after.sell.date], model$equity[buy.date] )	

To view the complete source code for this example, please have a look at the bt.commission.test() function in bt.test.r at github.

Weekend Reading: Market Neutral

November 2, 2013 2 comments

I recently came across a very interesting idea at the The Problem with Market Neutral (and an Answer) post by Mebane Faber. Today I want to show how you can test such strategy using the Systematic Investor Toolbox:

###############################################################################
# Load Systematic Investor Toolbox (SIT)
# https://systematicinvestor.wordpress.com/systematic-investor-toolbox/
###############################################################################
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
    source(con)
close(con)

	#*****************************************************************
	# Load historical data
	#******************************************************************    
	load.packages('quantmod')		
	
	data = new.env()
		
	# load historical market returns
	temp = get.fama.french.data('F-F_Research_Data_Factors', periodicity = '',download = T, clean = T)
		ret = temp[[1]]$Mkt.RF + temp[[1]]$RF
		price = bt.apply.matrix(ret / 100, function(x) cumprod(1 + x))
	data$SPY = make.stock.xts( price )
	
	# load historical momentum returns
	temp = get.fama.french.data('10_Portfolios_Prior_12_2', periodicity = '',download = T, clean = T)		
		ret = temp[[1]]
		price = bt.apply.matrix(ret / 100, function(x) cumprod(1 + x))
	data$HI.MO = make.stock.xts( price$High )
	data$LO.MO = make.stock.xts( price$Low )
	
	# align dates
	bt.prep(data, align='remove.na')
	
	#*****************************************************************
	# Code Strategies
	#*****************************************************************	
	models = list()
	
	data$weight[] = NA
		data$weight$SPY[] = 1
	models$SPY = bt.run.share(data, clean.signal=T)
	
	data$weight[] = NA
		data$weight$HI.MO[] = 1
	models$HI.MO = bt.run.share(data, clean.signal=T)
	
	data$weight[] = NA
		data$weight$LO.MO[] = 1
	models$LO.MO = bt.run.share(data, clean.signal=T)
	
	data$weight[] = NA
		data$weight$HI.MO[] = 1
		data$weight$LO.MO[] = -1
	models$MKT.NEUTRAL = bt.run.share(data, clean.signal=F)

	#*****************************************************************
	# Modified MN
	# The modified strategy below starts 100% market neutral, and depending on the drawdown bucket 
	# will reduce the shorts all the way to zero once the market has declined by 50%
	# (in 20% steps for every 10% decline in stocks)
	#*****************************************************************	
	market.drawdown = -100 * compute.drawdown(data$prices$SPY)
		market.drawdown.10.step = 10 * floor(market.drawdown / 10)
		short.allocation = 100 - market.drawdown.10.step * 2
		short.allocation[ short.allocation < 0 ] = 0
				
	data$weight[] = NA
		data$weight$HI.MO[] = 1
		data$weight$LO.MO[] = -1 * short.allocation / 100
	models$Modified.MN = bt.run.share(data, clean.signal=F)
	
	#*****************************************************************
	# Create Report
	#*****************************************************************
	strategy.performance.snapshoot(models, T)

plot1

Mebane thank you very much for sharing this great observation and great strategy that works! I would encourage readers to experiment with idea and share their findings.

If you want to concentrate on the long side, one idea that comes to mind is to start not fully invested say at 90% allocation, and once the market hits say 20% draw-down to invest 100% in expectation of quick recovery.

To view the complete source code for this example, please have a look at the bt.mebanefaber.modified.mn.test() function in bt.test.r at github.

Updates for Proportional Minimum Variance and Adaptive Shrinkage methods

October 29, 2013 1 comment

I create supporting pages for two projects I have collaborated with David Varadi in 2013:

Please check the links to get more info, including supporting blog posts, back-tests, R code to reproduce the back-tests, and more to come in the near future.

I and David appreciate your feedback and comments.

Update for Backtesting Asset Allocation Portfolios post

October 24, 2013 5 comments

It was over a year since my original post, Backtesting Asset Allocation portfolios. I have expanded the functionality of the Systematic Investor Toolbox both in terms of optimization functions and helper back-test functions during this period.

Today, I want to update the Backtesting Asset Allocation portfolios post and showcase new functionality. I will use the following global asset universe as: SPY,QQQ,EEM,IWM,EFA,TLT,IYR,GLD to form portfolios every month using different asset allocation methods.

###############################################################################
# Load Systematic Investor Toolbox (SIT)
# https://systematicinvestor.wordpress.com/systematic-investor-toolbox/
###############################################################################
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
    source(con)
close(con)
    
	#*****************************************************************
	# Load historical data
	#****************************************************************** 
	load.packages('quantmod,quadprog,corpcor,lpSolve')
	tickers = spl('SPY,QQQ,EEM,IWM,EFA,TLT,IYR,GLD')

	data <- new.env()
	getSymbols(tickers, src = 'yahoo', from = '1980-01-01', env = data, auto.assign = T)
		for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)							
	bt.prep(data, align='remove.na', dates='1990::') 
	
	#*****************************************************************
	# Code Strategies
	#****************************************************************** 					
	cluster.group = cluster.group.kmeans.90
		
	obj = portfolio.allocation.helper(data$prices, 
		periodicity = 'months', lookback.len = 60, 
		min.risk.fns = list(
			EW=equal.weight.portfolio,
			RP=risk.parity.portfolio(),
			MD=max.div.portfolio,						
			
			MV=min.var.portfolio,
			MVE=min.var.excel.portfolio,
			MV2=min.var2.portfolio,
			
			MC=min.corr.portfolio,
			MCE=min.corr.excel.portfolio,
			MC2=min.corr2.portfolio,
			
			MS=max.sharpe.portfolio(),
			ERC = equal.risk.contribution.portfolio,

			# target retunr / risk
			TRET.12 = target.return.portfolio(12/100),								
			TRISK.10 = target.risk.portfolio(10/100),
		
			# cluster
			C.EW = distribute.weights(equal.weight.portfolio, cluster.group),
			C.RP = distribute.weights(risk.parity.portfolio, cluster.group),
			
			# rso
			RSO.RP.5 = rso.portfolio(risk.parity.portfolio, 5, 500), 
			
			# others
			MMaxLoss = min.maxloss.portfolio,
			MMad = min.mad.portfolio,
			MCVaR = min.cvar.portfolio,
			MCDaR = min.cdar.portfolio,
			MMadDown = min.mad.downside.portfolio,
			MRiskDown = min.risk.downside.portfolio,
			MCorCov = min.cor.insteadof.cov.portfolio
		)
	)
	
	models = create.strategies(obj, data)$models
						
	#*****************************************************************
	# Create Report
	#******************************************************************    
	strategy.performance.snapshoot(models, T, 'Backtesting Asset Allocation portfolios')

plot1

I hope you will enjoy creating your own portfolio allocation methods or playing with a large variety of portfolio allocation techniques that are readily available for your experimentation.

To view the complete source code for this example, please have a look at the bt.aa.test.new() function in bt.test.r at github.

7Twelve Back-test

August 15, 2013 3 comments

I recently came across the The 7Twelve Portfolio strategy. I like the catchy name and the strategy report, “An Introduction to 7Twelve.” Following is some additional info about the The 7Twelve Portfolio strategy that I found useful:

Today I want to show how to back-test the The 7Twelve Portfolio strategy using the Systematic Investor Toolbox.

Let’s start by loading historical data

###############################################################################
# Load Systematic Investor Toolbox (SIT)
# https://systematicinvestor.wordpress.com/systematic-investor-toolbox/
###############################################################################
setInternet2(TRUE)
con = gzcon(url('http://www.systematicportfolio.com/sit.gz', 'rb'))
    source(con)
close(con)
   
    #*****************************************************************
    # Load historical data
    #******************************************************************
    load.packages('quantmod')  

    tickers = spl('VFINX,VIMSX,NAESX,VDMIX,VEIEX,VGSIX,FNARX,QRAAX,VBMFX,VIPSX,OIBAX,BIL') 
	
    data <- new.env()
    getSymbols(tickers, src = 'yahoo', from = '1970-01-01', env = data, auto.assign = T)

    #--------------------------------   
    # BIL     30-May-2007 
    # load 3-Month Treasury Bill from FRED
    TB3M = quantmod::getSymbols('DTB3', src='FRED', auto.assign = FALSE)		
    TB3M[] = ifna.prev(TB3M)	
    TB3M = processTBill(TB3M, timetomaturity = 1/4, 261)	
    #--------------------------------       	

    # extend	
    data$BIL = extend.data(data$BIL, TB3M, scale=T)	
	
    for(i in ls(data)) data[[i]] = adjustOHLC(data[[i]], use.Adjusted=T)		
				
    bt.prep(data, align='remove.na')

Next, let’s make the The 7Twelve Portfolio strategy with annual/ quarterly and monthly rebalancing.

    #*****************************************************************
    # Code Strategies
    #****************************************************************** 
    models = list()
	
    # Vanguard 500 Index Investor (VFINX)
    data$weight[] = NA
        data$weight$VFINX[] = 1
    models$VFINX  = bt.run.share(data, clean.signal=F) 
		
    #*****************************************************************
    # Code Strategies
    #****************************************************************** 	
    obj = portfolio.allocation.helper(data$prices, periodicity = 'years',
        min.risk.fns = list(EW=equal.weight.portfolio)
    ) 	
    models$year = create.strategies(obj, data)$models$EW

    obj = portfolio.allocation.helper(data$prices, periodicity = 'quarters',
        min.risk.fns = list(EW=equal.weight.portfolio)
    ) 	
    models$quarter = create.strategies(obj, data)$models$EW
		
    obj = portfolio.allocation.helper(data$prices, periodicity = 'months',
        min.risk.fns = list(EW=equal.weight.portfolio)
    ) 	
    models$month = create.strategies(obj, data)$models$EW
	
    #*****************************************************************
    # Create Report
    #****************************************************************** 
    strategy.performance.snapshoot(models, T)

plot1

The strategy does better than the Vanguard 500 Index benchmark, but still suffers a huge draw-down in 2008-2009 period.

How would you make it a better strategy? Please share your ideas.

To view the complete source code for this example, please have a look at the bt.7twelve.strategy.test() function in bt.test.r at github.