mirror of
https://github.com/bspeice/bspeice.github.io
synced 2024-12-30 10:18:11 -05:00
Add new blog in
This commit is contained in:
commit
fa5e72823e
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.swp
|
||||
*.pyc
|
||||
output/
|
6
.gitmodules
vendored
Normal file
6
.gitmodules
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
[submodule "pelican-plugins"]
|
||||
path = pelican-plugins
|
||||
url = https://github.com/getpelican/pelican-plugins/
|
||||
[submodule "nest"]
|
||||
path = nest
|
||||
url = https://github.com/molivier/nest
|
124
Makefile
Normal file
124
Makefile
Normal file
@ -0,0 +1,124 @@
|
||||
PY?=python
|
||||
PELICAN?=pelican
|
||||
PELICANOPTS=
|
||||
|
||||
BASEDIR=$(CURDIR)
|
||||
INPUTDIR=$(BASEDIR)/content
|
||||
OUTPUTDIR=$(BASEDIR)/output
|
||||
CONFFILE=$(BASEDIR)/pelicanconf.py
|
||||
PUBLISHCONF=$(BASEDIR)/publishconf.py
|
||||
|
||||
FTP_HOST=localhost
|
||||
FTP_USER=anonymous
|
||||
FTP_TARGET_DIR=/
|
||||
|
||||
SSH_HOST=localhost
|
||||
SSH_PORT=22
|
||||
SSH_USER=root
|
||||
SSH_TARGET_DIR=/var/www
|
||||
|
||||
S3_BUCKET=my_s3_bucket
|
||||
|
||||
CLOUDFILES_USERNAME=my_rackspace_username
|
||||
CLOUDFILES_API_KEY=my_rackspace_api_key
|
||||
CLOUDFILES_CONTAINER=my_cloudfiles_container
|
||||
|
||||
DROPBOX_DIR=~/Dropbox/Public/
|
||||
|
||||
GITHUB_PAGES_BRANCH=master
|
||||
|
||||
DEBUG ?= 0
|
||||
ifeq ($(DEBUG), 1)
|
||||
PELICANOPTS += -D
|
||||
endif
|
||||
|
||||
RELATIVE ?= 0
|
||||
ifeq ($(RELATIVE), 1)
|
||||
PELICANOPTS += --relative-urls
|
||||
endif
|
||||
|
||||
help:
|
||||
@echo 'Makefile for a pelican Web site '
|
||||
@echo ' '
|
||||
@echo 'Usage: '
|
||||
@echo ' make html (re)generate the web site '
|
||||
@echo ' make clean remove the generated files '
|
||||
@echo ' make regenerate regenerate files upon modification '
|
||||
@echo ' make publish generate using production settings '
|
||||
@echo ' make serve [PORT=8000] serve site at http://localhost:8000'
|
||||
@echo ' make serve-global [SERVER=0.0.0.0] serve (as root) to $(SERVER):80 '
|
||||
@echo ' make devserver [PORT=8000] start/restart develop_server.sh '
|
||||
@echo ' make stopserver stop local server '
|
||||
@echo ' make ssh_upload upload the web site via SSH '
|
||||
@echo ' make rsync_upload upload the web site via rsync+ssh '
|
||||
@echo ' make dropbox_upload upload the web site via Dropbox '
|
||||
@echo ' make ftp_upload upload the web site via FTP '
|
||||
@echo ' make s3_upload upload the web site via S3 '
|
||||
@echo ' make cf_upload upload the web site via Cloud Files'
|
||||
@echo ' make github upload the web site via gh-pages '
|
||||
@echo ' '
|
||||
@echo 'Set the DEBUG variable to 1 to enable debugging, e.g. make DEBUG=1 html '
|
||||
@echo 'Set the RELATIVE variable to 1 to enable relative urls '
|
||||
@echo ' '
|
||||
|
||||
html:
|
||||
$(PELICAN) $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
|
||||
|
||||
clean:
|
||||
[ ! -d $(OUTPUTDIR) ] || rm -rf $(OUTPUTDIR)
|
||||
|
||||
regenerate:
|
||||
$(PELICAN) -r $(INPUTDIR) -o $(OUTPUTDIR) -s $(CONFFILE) $(PELICANOPTS)
|
||||
|
||||
serve:
|
||||
ifdef PORT
|
||||
cd $(OUTPUTDIR) && $(PY) -m pelican.server $(PORT)
|
||||
else
|
||||
cd $(OUTPUTDIR) && $(PY) -m pelican.server
|
||||
endif
|
||||
|
||||
serve-global:
|
||||
ifdef SERVER
|
||||
cd $(OUTPUTDIR) && $(PY) -m pelican.server 80 $(SERVER)
|
||||
else
|
||||
cd $(OUTPUTDIR) && $(PY) -m pelican.server 80 0.0.0.0
|
||||
endif
|
||||
|
||||
|
||||
devserver:
|
||||
ifdef PORT
|
||||
$(BASEDIR)/develop_server.sh restart $(PORT)
|
||||
else
|
||||
$(BASEDIR)/develop_server.sh restart
|
||||
endif
|
||||
|
||||
stopserver:
|
||||
$(BASEDIR)/develop_server.sh stop
|
||||
@echo 'Stopped Pelican and SimpleHTTPServer processes running in background.'
|
||||
|
||||
publish:
|
||||
$(PELICAN) $(INPUTDIR) -o $(OUTPUTDIR) -s $(PUBLISHCONF) $(PELICANOPTS)
|
||||
|
||||
ssh_upload: publish
|
||||
scp -P $(SSH_PORT) -r $(OUTPUTDIR)/* $(SSH_USER)@$(SSH_HOST):$(SSH_TARGET_DIR)
|
||||
|
||||
rsync_upload: publish
|
||||
rsync -e "ssh -p $(SSH_PORT)" -P -rvzc --delete $(OUTPUTDIR)/ $(SSH_USER)@$(SSH_HOST):$(SSH_TARGET_DIR) --cvs-exclude
|
||||
|
||||
dropbox_upload: publish
|
||||
cp -r $(OUTPUTDIR)/* $(DROPBOX_DIR)
|
||||
|
||||
ftp_upload: publish
|
||||
lftp ftp://$(FTP_USER)@$(FTP_HOST) -e "mirror -R $(OUTPUTDIR) $(FTP_TARGET_DIR) ; quit"
|
||||
|
||||
s3_upload: publish
|
||||
s3cmd sync $(OUTPUTDIR)/ s3://$(S3_BUCKET) --acl-public --delete-removed --guess-mime-type
|
||||
|
||||
cf_upload: publish
|
||||
cd $(OUTPUTDIR) && swift -v -A https://auth.api.rackspacecloud.com/v1.0 -U $(CLOUDFILES_USERNAME) -K $(CLOUDFILES_API_KEY) upload -c $(CLOUDFILES_CONTAINER) .
|
||||
|
||||
github: publish
|
||||
ghp-import -m "Generate Pelican site" -b $(GITHUB_PAGES_BRANCH) $(OUTPUTDIR)
|
||||
git push origin $(GITHUB_PAGES_BRANCH)
|
||||
|
||||
.PHONY: html help clean regenerate serve serve-global devserver publish ssh_upload rsync_upload dropbox_upload ftp_upload s3_upload cf_upload github
|
199
_nb_header.html
Normal file
199
_nb_header.html
Normal file
File diff suppressed because one or more lines are too long
58
content/articles/2015-11-14-welcome.md
Normal file
58
content/articles/2015-11-14-welcome.md
Normal file
@ -0,0 +1,58 @@
|
||||
Title: Welcome, and an algorithm
|
||||
Date: 2015-11-19
|
||||
Tags: introduction, trading
|
||||
Modified: 2015-12-05
|
||||
Category: Blog
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
Hello! Glad to meet you. I'm currently a student at Columbia University
|
||||
studying Financial Engineering, and want to give an overview of the projects
|
||||
I'm working on!
|
||||
|
||||
To start things off, Columbia has been hosting a trading competition that
|
||||
myself and another partner are competing in. I'm including a notebook of the
|
||||
algorithm that we're using, just to give a simple overview of a miniature
|
||||
algorithm.
|
||||
|
||||
The competition is scored in 3 areas:
|
||||
|
||||
- Total return
|
||||
- [Sharpe ratio](1)
|
||||
- Maximum drawdown
|
||||
|
||||
Our algorithm uses a basic momentum strategy: in the given list of potential
|
||||
portfolios, pick the stocks that have been performing well in the past 30
|
||||
days. Then, optimize for return subject to the drawdown being below a specific
|
||||
level. We didn't include the Sharpe ratio as a constraint, mostly because
|
||||
we were a bit late entering the competition.
|
||||
|
||||
I'll be updating this post with the results of our algorithm as they come along!
|
||||
|
||||
---
|
||||
|
||||
**UPDATE 12/5/2015**: Now that the competition has ended, I wanted to update
|
||||
how the algorithm performed. Unfortunately, it didn't do very well. I'm planning
|
||||
to make some tweaks over the coming weeks, and do another forward test in January.
|
||||
|
||||
- After week 1: Down .1%
|
||||
- After week 2: Down 1.4%
|
||||
- After week 3: Flat
|
||||
|
||||
And some statistics for all teams participating in the competition:
|
||||
|
||||
| | |
|
||||
|--------------------|--------|
|
||||
| Max Return | 74.1% |
|
||||
| Min Return | -97.4% |
|
||||
| Average Return | -.1% |
|
||||
| Std Dev of Returns | 19.6% |
|
||||
|
||||
---
|
||||
|
||||
{% notebook 2015-11-14-welcome.ipynb %}
|
||||
|
||||
[1]: https://en.wikipedia.org/wiki/Sharpe_ratio
|
20
content/articles/2015-11-27-autocallable.md
Normal file
20
content/articles/2015-11-27-autocallable.md
Normal file
@ -0,0 +1,20 @@
|
||||
Title: Autocallable Bonds
|
||||
Date: 2015-11-27
|
||||
Category: Blog
|
||||
Tags: finance, simulation, monte carlo
|
||||
Authors: Bradlee Speice
|
||||
Summary: For a final project, my group was tasked with understanding three exotic derivatives: The Athena, Phoenix without memory, and Phoenix with memory autocallable products.
|
||||
[//]: <> "Modified:"
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
My only non-core class this semester has been in Structure Products. We've been surveying a wide variety of products, and the final project was to pick one to report on.
|
||||
Because these are all very similar, we decided to demonstrate all 3 products at once.
|
||||
|
||||
What follows below is a notebook demonstrating the usage of [Julia](http://julialang.com) for Monte-Carlo simulation of some exotic products.
|
||||
|
||||
{% notebook 2015-11-27-autocallable.ipynb language[julia] %}
|
||||
|
14
content/articles/2015-12-26-testing_cramer.md
Normal file
14
content/articles/2015-12-26-testing_cramer.md
Normal file
@ -0,0 +1,14 @@
|
||||
Title: Testing Cramer
|
||||
Date: 2015-12-26
|
||||
Category: Blog
|
||||
Tags: futures, data science
|
||||
Authors: Bradlee Speice
|
||||
Summary:
|
||||
[//]: <> "Modified: "
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
{% notebook 2015-12-26-testing_cramer.ipynb %}
|
14
content/articles/2016-1-1-complaining-about-weather.md
Normal file
14
content/articles/2016-1-1-complaining-about-weather.md
Normal file
@ -0,0 +1,14 @@
|
||||
Title: Complaining about the Weather
|
||||
Date: 2016-01-01
|
||||
Category: Blog
|
||||
Tags: weather
|
||||
Authors: Bradlee Speice
|
||||
Summary: Figuring out whether people should be complaining about the recent weather in NC.
|
||||
[//]: <> "Modified: "
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
{% notebook 2016-1-1-complaining-about-weather.ipynb %}
|
14
content/articles/2016-1-23-cloudy-in-seattle.md
Normal file
14
content/articles/2016-1-23-cloudy-in-seattle.md
Normal file
@ -0,0 +1,14 @@
|
||||
Title: Cloudy in Seattle
|
||||
Date: 2016-01-23
|
||||
Category: Blog
|
||||
Tags: weather, data science
|
||||
Authors: Bradlee Speice
|
||||
Summary: Building on prior analysis, is Seattle's reputation as a depressing city actually well-earned?
|
||||
[//]: <> "Modified: "
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
{% notebook 2016-1-23-cloudy-in-seattle.ipynb %}
|
@ -0,0 +1,14 @@
|
||||
Title: Profitability using the Investment Formula
|
||||
Date: 2016-02-26
|
||||
Category: Blog
|
||||
Tags: algorithmic-trading, python
|
||||
Authors: Bradlee Speice
|
||||
Summary: After developing a formula to guide our investing, how do we actually evaluate its performance in the real world?
|
||||
[//]: <> "Modified: "
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
{% notebook 2016-2-26-profitability-using-the-investment-formula.ipynb %}
|
14
content/articles/2016-2-3-guaranteed-money-maker.md
Normal file
14
content/articles/2016-2-3-guaranteed-money-maker.md
Normal file
@ -0,0 +1,14 @@
|
||||
Title: Guaranteed Money Maker
|
||||
Date: 2016-02-03
|
||||
Category: Blog
|
||||
Tags: martingale, strategy
|
||||
Authors: Bradlee Speice
|
||||
Summary: Developing an investment strategy based on the Martingale betting strategy
|
||||
[//]: <> "Modified: "
|
||||
|
||||
<script type="text/x-mathjax-config">
|
||||
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\(','\)']]}});
|
||||
</script>
|
||||
<script async src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_CHTML'></script>
|
||||
|
||||
{% notebook 2016-2-3-guaranteed-money-maker.ipynb %}
|
246
content/images/logo.svg
Normal file
246
content/images/logo.svg
Normal file
@ -0,0 +1,246 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
width="40"
|
||||
height="40"
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="0.91 r13725"
|
||||
sodipodi:docname="logo.svg">
|
||||
<metadata
|
||||
id="metadata22">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<defs
|
||||
id="defs20" />
|
||||
<sodipodi:namedview
|
||||
pagecolor="#000000"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1280"
|
||||
inkscape:window-height="666"
|
||||
id="namedview18"
|
||||
showgrid="false"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:zoom="2.38396"
|
||||
inkscape:cx="-45.314237"
|
||||
inkscape:cy="4.59551"
|
||||
inkscape:window-x="-8"
|
||||
inkscape:window-y="-8"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="g4" />
|
||||
<!-- Created with SVG-edit - http://svg-edit.googlecode.com/ -->
|
||||
<g
|
||||
id="g4"
|
||||
transform="translate(-49.932101,-49.948405)">
|
||||
<title
|
||||
id="title6">Layer 1</title>
|
||||
<rect
|
||||
id="svg_4"
|
||||
height="37.140415"
|
||||
width="37.140415"
|
||||
y="51.378197"
|
||||
x="51.361893"
|
||||
style="fill:#000000;fill-opacity:0.01000001;stroke:#ffffff;stroke-width:2.85958362" />
|
||||
<text
|
||||
transform="matrix(0,0.99987451,-1.0001255,0,0,0)"
|
||||
xml:space="preserve"
|
||||
font-size="24"
|
||||
id="svg_5"
|
||||
y="-58.652771"
|
||||
x="63.576546"
|
||||
stroke-linecap="null"
|
||||
stroke-linejoin="null"
|
||||
style="font-size:35.19306946px;font-family:serif;text-anchor:middle;fill:#000000;fill-opacity:0.01000001;stroke:#ffffff;stroke-width:1.46637785;stroke-dasharray:none">$</text>
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="53.935837"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="53.935837"
|
||||
y="79.112679" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="53.935837"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-6"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="57.939758"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-87"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="57.939758"
|
||||
y="79.112679" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-8"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="57.939758"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-9"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="61.94368"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-3"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="61.94368"
|
||||
y="79.112679" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-4"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="61.94368"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-6-6"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="65.947601"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-87-9"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="65.947601"
|
||||
y="79.112679" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-8-3"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="65.947601"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-1"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="71.09549"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-83"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="71.09549"
|
||||
y="79.112663" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-48"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="71.09549"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-6-2"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="75.099411"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-87-3"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="75.099411"
|
||||
y="79.112663" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-8-9"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="75.099411"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-9-2"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="79.103333"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-3-2"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="79.103333"
|
||||
y="79.112663" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-4-3"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="79.103333"
|
||||
y="83.115593" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-6-6-2"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="83.107254"
|
||||
y="75.109764" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-87-9-0"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="83.107254"
|
||||
y="79.112663" />
|
||||
<rect
|
||||
style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
|
||||
id="rect3350-4-8-8-3-3"
|
||||
width="2.8599427"
|
||||
height="2.8592248"
|
||||
x="83.107254"
|
||||
y="83.115593" />
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 8.9 KiB |
293
content/notebooks/2015-11-14-welcome.ipynb
Normal file
293
content/notebooks/2015-11-14-welcome.ipynb
Normal file
@ -0,0 +1,293 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Trading Competition Optimization\n",
|
||||
"\n",
|
||||
"### Goal: Max return given maximum Sharpe and Drawdown"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import display\n",
|
||||
"import Quandl\n",
|
||||
"from datetime import datetime, timedelta\n",
|
||||
"\n",
|
||||
"tickers = ['XOM', 'CVX', 'CLB', 'OXY', 'SLB']\n",
|
||||
"market_ticker = 'GOOG/NYSE_VOO'\n",
|
||||
"lookback = 30\n",
|
||||
"d_col = 'Close'\n",
|
||||
"\n",
|
||||
"data = {tick: Quandl.get('YAHOO/{}'.format(tick))[-lookback:] for tick in tickers}\n",
|
||||
"market = Quandl.get(market_ticker)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Calculating the Return\n",
|
||||
"We first want to know how much each ticker returned over the prior period."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'CLB': -0.0016320202164526894,\n",
|
||||
" 'CVX': 0.0010319531629488911,\n",
|
||||
" 'OXY': 0.00093418904454400551,\n",
|
||||
" 'SLB': 0.00098431254720448159,\n",
|
||||
" 'XOM': 0.00044165797556096868}"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"returns = {tick: data[tick][d_col].pct_change() for tick in tickers}\n",
|
||||
"\n",
|
||||
"display({tick: returns[tick].mean() for tick in tickers})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Calculating the Sharpe ratio\n",
|
||||
"Sharpe: ${R - R_M \\over \\sigma}$\n",
|
||||
"\n",
|
||||
"We use the average return over the lookback period, minus the market average return, over the ticker standard deviation to calculate the Sharpe. Shorting a stock turns a negative Sharpe positive."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'CLB': -0.10578734457846127,\n",
|
||||
" 'CVX': 0.027303529817677398,\n",
|
||||
" 'OXY': 0.022622210057414487,\n",
|
||||
" 'SLB': 0.026950946344858676,\n",
|
||||
" 'XOM': -0.0053519259698605499}"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"market_returns = market.pct_change()\n",
|
||||
"\n",
|
||||
"sharpe = lambda ret: (ret.mean() - market_returns[d_col].mean()) / ret.std()\n",
|
||||
"sharpes = {tick: sharpe(returns[tick]) for tick in tickers}\n",
|
||||
"\n",
|
||||
"display(sharpes)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Calculating the drawdown\n",
|
||||
"This one is easy - what is the maximum daily change over the lookback period? That is, because we will allow short positions, we are not concerned strictly with maximum downturn, but in general, what is the largest 1-day change?"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'CLB': 0.043551495607375035,\n",
|
||||
" 'CVX': 0.044894389686214398,\n",
|
||||
" 'OXY': 0.051424517867144637,\n",
|
||||
" 'SLB': 0.034774627850375328,\n",
|
||||
" 'XOM': 0.035851524605672758}"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"drawdown = lambda ret: ret.abs().max()\n",
|
||||
"drawdowns = {tick: drawdown(returns[tick]) for tick in tickers}\n",
|
||||
"\n",
|
||||
"display(drawdowns)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Performing the optimization\n",
|
||||
"\n",
|
||||
"$\\begin{align}\n",
|
||||
"max\\ \\ & \\mu \\cdot \\omega\\\\\n",
|
||||
"s.t.\\ \\ & \\vec{1} \\omega = 1\\\\\n",
|
||||
"& \\vec{S} \\omega \\ge s\\\\\n",
|
||||
"& \\vec{D} \\cdot | \\omega | \\le d\\\\\n",
|
||||
"& \\left|\\omega\\right| \\le l\\\\\n",
|
||||
"\\end{align}$\n",
|
||||
"\n",
|
||||
"We want to maximize average return subject to having a full portfolio, Sharpe above a specific level, drawdown below a level, and leverage not too high - that is, don't have huge long/short positions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Optimization terminated successfully.'"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Holdings: [('XOM', 5.8337945679814904), ('CVX', 42.935064321851307), ('CLB', -124.5), ('OXY', 36.790387773552119), ('SLB', 39.940753336615096)]\""
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Expected Return: 32.375%'"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Expected Max Drawdown: 4.34%'"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"from scipy.optimize import minimize\n",
|
||||
"\n",
|
||||
"#sharpe_limit = .1\n",
|
||||
"drawdown_limit = .05\n",
|
||||
"leverage = 250\n",
|
||||
"\n",
|
||||
"# Use the map so we can guarantee we maintain the correct order\n",
|
||||
"# sharpe_a = np.array(list(map(lambda tick: sharpes[tick], tickers))) * -1 # So we can write as upper-bound\n",
|
||||
"dd_a = np.array(list(map(lambda tick: drawdowns[tick], tickers)))\n",
|
||||
"returns_a = np.array(list(map(lambda tick: returns[tick].mean(), tickers))) # Because minimizing\n",
|
||||
"\n",
|
||||
"meets_sharpe = lambda x: sum(abs(x) * sharpe_a) - sharpe_limit\n",
|
||||
"def meets_dd(x):\n",
|
||||
" portfolio = sum(abs(x))\n",
|
||||
" if portfolio < .1:\n",
|
||||
" # If there are no stocks in the portfolio,\n",
|
||||
" # we can accidentally induce division by 0,\n",
|
||||
" # or division by something small enough to cause infinity\n",
|
||||
" return 0\n",
|
||||
" \n",
|
||||
" return drawdown_limit - sum(abs(x) * dd_a) / sum(abs(x))\n",
|
||||
"\n",
|
||||
"is_portfolio = lambda x: sum(x) - 1\n",
|
||||
"\n",
|
||||
"def within_leverage(x):\n",
|
||||
" return leverage - sum(abs(x))\n",
|
||||
"\n",
|
||||
"objective = lambda x: sum(x * returns_a) * -1 # Because we're minimizing\n",
|
||||
"bounds = ((None, None),) * len(tickers)\n",
|
||||
"x = np.zeros(len(tickers))\n",
|
||||
"\n",
|
||||
"constraints = [\n",
|
||||
" {\n",
|
||||
" 'type': 'eq',\n",
|
||||
" 'fun': is_portfolio\n",
|
||||
" }, {\n",
|
||||
" 'type': 'ineq',\n",
|
||||
" 'fun': within_leverage\n",
|
||||
" #}, {\n",
|
||||
" # 'type': 'ineq',\n",
|
||||
" # 'fun': meets_sharpe\n",
|
||||
" }, {\n",
|
||||
" 'type': 'ineq',\n",
|
||||
" 'fun': meets_dd\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"optimal = minimize(objective, x, bounds=bounds, constraints=constraints,\n",
|
||||
" options={'maxiter': 500})\n",
|
||||
"\n",
|
||||
"# Optimization time!\n",
|
||||
"display(optimal.message)\n",
|
||||
"\n",
|
||||
"display(\"Holdings: {}\".format(list(zip(tickers, optimal.x))))\n",
|
||||
"\n",
|
||||
"expected_return = optimal.fun * -100 # multiply by -100 to scale, and compensate for minimizing\n",
|
||||
"display(\"Expected Return: {:.3f}%\".format(expected_return))\n",
|
||||
"\n",
|
||||
"expected_drawdown = sum(abs(optimal.x) * dd_a) / sum(abs(optimal.x)) * 100\n",
|
||||
"display(\"Expected Max Drawdown: {0:.2f}%\".format(expected_drawdown))\n",
|
||||
"\n",
|
||||
"# TODO: Calculate expected Sharpe"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.5.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
3974
content/notebooks/2015-11-27-autocallable.ipynb
Normal file
3974
content/notebooks/2015-11-27-autocallable.ipynb
Normal file
File diff suppressed because one or more lines are too long
428
content/notebooks/2015-12-26-testing_cramer.ipynb
Normal file
428
content/notebooks/2015-12-26-testing_cramer.ipynb
Normal file
@ -0,0 +1,428 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from dateutil import parser as dtparser\n",
|
||||
"from dateutil.relativedelta import relativedelta\n",
|
||||
"from datetime import datetime\n",
|
||||
"from html.parser import HTMLParser\n",
|
||||
"from copy import copy\n",
|
||||
"import Quandl"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Testing Cramer\n",
|
||||
"\n",
|
||||
"Pursuant to attending a graduate school studying Financial Engineering, I've been a fan of the [Mad Money][3] TV show featuring the bombastic Jim Cramer. One of the things that he's said is that you shouldn't use the futures to predict where the stock market is going to go. But he says it often enough, I've begun to wonder - who is he trying to convince?\n",
|
||||
"\n",
|
||||
"It makes sense that because futures on things like the S&P 500 are traded continuously, they would price in market information before the stock market opens. So is Cramer right to be convinced that strategies based on the futures are a poor idea? I wanted to test it out.\n",
|
||||
"\n",
|
||||
"The first question is where to get the future's data. I've been part of [Seeking Alpha][2] for a bit, and they publish the [Wall Street Breakfast][3] newsletter which contains daily future's returns as of 6:20 AM EST. I'd be interested in using that data to see if we can actually make some money.\n",
|
||||
"\n",
|
||||
"First though, let's get the data:\n",
|
||||
"\n",
|
||||
"# Downloading Futures data from Seeking Alpha\n",
|
||||
"\n",
|
||||
"We're going to define two HTML parsing classes - one to get the article URL's from a page, and one to get the actual data from each article.\n",
|
||||
"\n",
|
||||
"[1]: http://www.cnbc.com/mad-money/\n",
|
||||
"[2]: http://seekingalpha.com/\n",
|
||||
"[3]: http://seekingalpha.com/author/wall-street-breakfast?s=wall-street-breakfast"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ArticleListParser(HTMLParser):\n",
|
||||
" \"\"\"Given a web page with articles on it, parse out the article links\"\"\"\n",
|
||||
" \n",
|
||||
" articles = []\n",
|
||||
" \n",
|
||||
" def handle_starttag(self, tag, attrs):\n",
|
||||
" #if tag == 'div' and (\"id\", \"author_articles_wrapper\") in attrs:\n",
|
||||
" # self.fetch_links = True\n",
|
||||
" if tag == 'a' and ('class', 'dashboard_article_link') in attrs:\n",
|
||||
" href = list(filter(lambda x: x[0] == 'href', attrs))[0][1]\n",
|
||||
" self.articles.append(href)\n",
|
||||
" \n",
|
||||
"base_url = \"http://seekingalpha.com/author/wall-street-breakfast/articles\"\n",
|
||||
"article_page_urls = [base_url] + [base_url + '/{}'.format(i) for i in range(2, 20)]\n",
|
||||
"\n",
|
||||
"global_articles = []\n",
|
||||
"for page in article_page_urls:\n",
|
||||
" # We need to switch the user agent, as SA blocks the standard requests agent\n",
|
||||
" articles_html = requests.get(page,\n",
|
||||
" headers={\"User-Agent\": \"Wget/1.13.4\"})\n",
|
||||
" parser = ArticleListParser()\n",
|
||||
" parser.feed(articles_html.text)\n",
|
||||
" global_articles += (parser.articles)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false,
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ArticleReturnParser(HTMLParser):\n",
|
||||
" \"Given an article, parse out the futures returns in it\"\n",
|
||||
" \n",
|
||||
" record_font_tags = False\n",
|
||||
" in_font_tag = False\n",
|
||||
" counter = 0\n",
|
||||
" # data = {} # See __init__\n",
|
||||
" \n",
|
||||
" def __init__(self, *args, **kwargs):\n",
|
||||
" super().__init__(*args, **kwargs)\n",
|
||||
" self.data = {}\n",
|
||||
" \n",
|
||||
" def handle_starttag(self, tag, attrs):\n",
|
||||
" if tag == 'span' and ('itemprop', 'datePublished') in attrs:\n",
|
||||
" date_string = list(filter(lambda x: x[0] == 'content', attrs))[0][1]\n",
|
||||
" date = dtparser.parse(date_string)\n",
|
||||
" self.data['date'] = date\n",
|
||||
" \n",
|
||||
" self.in_font_tag = tag == 'font'\n",
|
||||
" \n",
|
||||
" def safe_float(self, string):\n",
|
||||
" try:\n",
|
||||
" return float(string[:-1]) / 100\n",
|
||||
" except ValueError:\n",
|
||||
" return np.NaN\n",
|
||||
" \n",
|
||||
" def handle_data(self, content):\n",
|
||||
" if not self.record_font_tags and \"Futures at 6\" in content:\n",
|
||||
" self.record_font_tags = True\n",
|
||||
" \n",
|
||||
" if self.record_font_tags and self.in_font_tag:\n",
|
||||
" if self.counter == 0:\n",
|
||||
" self.data['DOW'] = self.safe_float(content)\n",
|
||||
" elif self.counter == 1:\n",
|
||||
" self.data['S&P'] = self.safe_float(content)\n",
|
||||
" elif self.counter == 2:\n",
|
||||
" self.data['NASDAQ'] = self.safe_float(content)\n",
|
||||
" elif self.counter == 3:\n",
|
||||
" self.data['Crude'] = self.safe_float(content)\n",
|
||||
" elif self.counter == 4:\n",
|
||||
" self.data['Gold'] = self.safe_float(content)\n",
|
||||
" \n",
|
||||
" self.counter += 1\n",
|
||||
" \n",
|
||||
" def handle_endtag(self, tag):\n",
|
||||
" self.in_font_tag = False\n",
|
||||
"\n",
|
||||
"def retrieve_data(url):\n",
|
||||
" sa = \"http://seekingalpha.com\"\n",
|
||||
" article_html = requests.get(sa + url,\n",
|
||||
" headers={\"User-Agent\": \"Wget/1.13.4\"})\n",
|
||||
" parser = ArticleReturnParser()\n",
|
||||
" parser.feed(article_html.text)\n",
|
||||
" parser.data.update({\"url\": url})\n",
|
||||
" parser.data.update({\"text\": article_html.text})\n",
|
||||
" return parser.data\n",
|
||||
"\n",
|
||||
"# This copy **MUST** be in place. I'm not sure why,\n",
|
||||
"# as you'd think that the data being returned would already\n",
|
||||
"# represent a different memory location. Even so, it blows up\n",
|
||||
"# if you don't do this.\n",
|
||||
"article_list = list(set(global_articles))\n",
|
||||
"article_data = [copy(retrieve_data(url)) for url in article_list]\n",
|
||||
"# If there's an issue downloading the article, drop it.\n",
|
||||
"article_df = pd.DataFrame.from_dict(article_data).dropna()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Fetching the Returns data\n",
|
||||
"\n",
|
||||
"Now that we have the futures data, we're going to compare across 4 different indices - the S&P 500 index, Dow Jones Industrial, Russell 2000, and NASDAQ 100. Let's get the data off of Quandl to make things easier!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# article_df is sorted by date, so we get the first row.\n",
|
||||
"start_date = article_df.sort_values(by='date').iloc[0]['date'] - relativedelta(days=1)\n",
|
||||
"SPY = Quandl.get(\"GOOG/NYSE_SPY\", trim_start=start_date)\n",
|
||||
"DJIA = Quandl.get(\"GOOG/AMS_DIA\", trim_start=start_date)\n",
|
||||
"RUSS = Quandl.get(\"GOOG/AMEX_IWM\", trim_start=start_date)\n",
|
||||
"NASDAQ = Quandl.get(\"GOOG/EPA_QQQ\", trim_start=start_date)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Running the Comparison\n",
|
||||
"\n",
|
||||
"There are two types of tests I want to determine: How accurate each futures category is at predicting the index's opening change over the close before, and predicting the index's daily return.\n",
|
||||
"\n",
|
||||
"Let's first calculate how good each future is at predicting the opening return over the previous day. I expect that the futures will be more than 50% accurate, since the information is recorded 3 hours before the markets open."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Articles Checked: \n",
|
||||
" DJIA NASDAQ RUSS SPY\n",
|
||||
"Crude 268 268 271 271\n",
|
||||
"DOW 268 268 271 271\n",
|
||||
"Gold 268 268 271 271\n",
|
||||
"NASDAQ 268 268 271 271\n",
|
||||
"S&P 268 268 271 271\n",
|
||||
"\n",
|
||||
"Prediction Accuracy:\n",
|
||||
" DJIA NASDAQ RUSS SPY\n",
|
||||
"Crude 0.544776 0.522388 0.601476 0.590406\n",
|
||||
"DOW 0.611940 0.604478 0.804428 0.841328\n",
|
||||
"Gold 0.462687 0.455224 0.464945 0.476015\n",
|
||||
"NASDAQ 0.615672 0.608209 0.797048 0.830258\n",
|
||||
"S&P 0.604478 0.597015 0.811808 0.848708\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def calculate_opening_ret(frame):\n",
|
||||
" # I'm not a huge fan of the appending for loop,\n",
|
||||
" # but it's a bit verbose for a comprehension\n",
|
||||
" data = {}\n",
|
||||
" for i in range(1, len(frame)):\n",
|
||||
" date = frame.iloc[i].name\n",
|
||||
" prior_close = frame.iloc[i-1]['Close']\n",
|
||||
" open_val = frame.iloc[i]['Open']\n",
|
||||
" data[date] = (open_val - prior_close) / prior_close\n",
|
||||
" \n",
|
||||
" return data\n",
|
||||
"\n",
|
||||
"SPY_open_ret = calculate_opening_ret(SPY)\n",
|
||||
"DJIA_open_ret = calculate_opening_ret(DJIA)\n",
|
||||
"RUSS_open_ret = calculate_opening_ret(RUSS)\n",
|
||||
"NASDAQ_open_ret = calculate_opening_ret(NASDAQ)\n",
|
||||
"\n",
|
||||
"def signs_match(list_1, list_2):\n",
|
||||
" # This is a surprisingly difficult task - we have to match\n",
|
||||
" # up the dates in order to check if opening returns actually match\n",
|
||||
" index_dict_dt = {key.to_datetime(): list_2[key] for key in list_2.keys()}\n",
|
||||
" \n",
|
||||
" matches = []\n",
|
||||
" for row in list_1.iterrows():\n",
|
||||
" row_dt = row[1][1]\n",
|
||||
" row_value = row[1][0]\n",
|
||||
" index_dt = datetime(row_dt.year, row_dt.month, row_dt.day)\n",
|
||||
" if index_dt in list_2:\n",
|
||||
" index_value = list_2[index_dt]\n",
|
||||
" if (row_value > 0 and index_value > 0) or \\\n",
|
||||
" (row_value < 0 and index_value < 0) or \\\n",
|
||||
" (row_value == 0 and index_value == 0):\n",
|
||||
" matches += [1]\n",
|
||||
" else:\n",
|
||||
" matches += [0]\n",
|
||||
" #print(\"{}\".format(list_2[index_dt]))\n",
|
||||
" return matches\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"prediction_dict = {}\n",
|
||||
"matches_dict = {}\n",
|
||||
"count_dict = {}\n",
|
||||
"index_dict = {\"SPY\": SPY_open_ret, \"DJIA\": DJIA_open_ret, \"RUSS\": RUSS_open_ret, \"NASDAQ\": NASDAQ_open_ret}\n",
|
||||
"indices = [\"SPY\", \"DJIA\", \"RUSS\", \"NASDAQ\"]\n",
|
||||
"futures = [\"Crude\", \"Gold\", \"DOW\", \"NASDAQ\", \"S&P\"]\n",
|
||||
"for index in indices:\n",
|
||||
" matches_dict[index] = {future: signs_match(article_df[[future, 'date']],\n",
|
||||
" index_dict[index]) for future in futures}\n",
|
||||
" count_dict[index] = {future: len(matches_dict[index][future]) for future in futures}\n",
|
||||
" prediction_dict[index] = {future: np.mean(matches_dict[index][future])\n",
|
||||
" for future in futures}\n",
|
||||
"print(\"Articles Checked: \")\n",
|
||||
"print(pd.DataFrame.from_dict(count_dict))\n",
|
||||
"print()\n",
|
||||
"print(\"Prediction Accuracy:\")\n",
|
||||
"print(pd.DataFrame.from_dict(prediction_dict))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This data is very interesting. Some insights:\n",
|
||||
"\n",
|
||||
"- Both DOW and NASDAQ futures are pretty bad at predicting their actual market openings\n",
|
||||
"- NASDAQ and Dow are fairly unpredictable; Russell 2000 and S&P are very predictable\n",
|
||||
"- Gold is a poor predictor in general - intuitively Gold should move inverse to the market, but it appears to be about as accurate as a coin flip.\n",
|
||||
"\n",
|
||||
"All said though it appears that futures data is important for determining market direction for both the S&P 500 and Russell 2000. Cramer is half-right: futures data isn't very helpful for the Dow and NASDAQ indices, but is great for the S&P and Russell indices.\n",
|
||||
"\n",
|
||||
"# The next step - Predicting the close\n",
|
||||
"\n",
|
||||
"Given the code we currently have, I'd like to predict the close of the market as well. We can re-use most of the code, so let's see what happens:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Articles Checked:\n",
|
||||
" DJIA NASDAQ RUSS SPY\n",
|
||||
"Crude 268 268 271 271\n",
|
||||
"DOW 268 268 271 271\n",
|
||||
"Gold 268 268 271 271\n",
|
||||
"NASDAQ 268 268 271 271\n",
|
||||
"S&P 268 268 271 271\n",
|
||||
"\n",
|
||||
"Prediction Accuracy:\n",
|
||||
" DJIA NASDAQ RUSS SPY\n",
|
||||
"Crude 0.533582 0.529851 0.501845 0.542435\n",
|
||||
"DOW 0.589552 0.608209 0.535055 0.535055\n",
|
||||
"Gold 0.455224 0.451493 0.483395 0.512915\n",
|
||||
"NASDAQ 0.582090 0.626866 0.531365 0.538745\n",
|
||||
"S&P 0.585821 0.608209 0.535055 0.535055\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def calculate_closing_ret(frame):\n",
|
||||
" # I'm not a huge fan of the appending for loop,\n",
|
||||
" # but it's a bit verbose for a comprehension\n",
|
||||
" data = {}\n",
|
||||
" for i in range(0, len(frame)):\n",
|
||||
" date = frame.iloc[i].name\n",
|
||||
" open_val = frame.iloc[i]['Open']\n",
|
||||
" close_val = frame.iloc[i]['Close']\n",
|
||||
" data[date] = (close_val - open_val) / open_val\n",
|
||||
" \n",
|
||||
" return data\n",
|
||||
"\n",
|
||||
"SPY_close_ret = calculate_closing_ret(SPY)\n",
|
||||
"DJIA_close_ret = calculate_closing_ret(DJIA)\n",
|
||||
"RUSS_close_ret = calculate_closing_ret(RUSS)\n",
|
||||
"NASDAQ_close_ret = calculate_closing_ret(NASDAQ)\n",
|
||||
"\n",
|
||||
"def signs_match(list_1, list_2):\n",
|
||||
" # This is a surprisingly difficult task - we have to match\n",
|
||||
" # up the dates in order to check if opening returns actually match\n",
|
||||
" index_dict_dt = {key.to_datetime(): list_2[key] for key in list_2.keys()}\n",
|
||||
" \n",
|
||||
" matches = []\n",
|
||||
" for row in list_1.iterrows():\n",
|
||||
" row_dt = row[1][1]\n",
|
||||
" row_value = row[1][0]\n",
|
||||
" index_dt = datetime(row_dt.year, row_dt.month, row_dt.day)\n",
|
||||
" if index_dt in list_2:\n",
|
||||
" index_value = list_2[index_dt]\n",
|
||||
" if (row_value > 0 and index_value > 0) or \\\n",
|
||||
" (row_value < 0 and index_value < 0) or \\\n",
|
||||
" (row_value == 0 and index_value == 0):\n",
|
||||
" matches += [1]\n",
|
||||
" else:\n",
|
||||
" matches += [0]\n",
|
||||
" #print(\"{}\".format(list_2[index_dt]))\n",
|
||||
" return matches\n",
|
||||
" \n",
|
||||
" \n",
|
||||
"matches_dict = {}\n",
|
||||
"count_dict = {}\n",
|
||||
"prediction_dict = {}\n",
|
||||
"index_dict = {\"SPY\": SPY_close_ret, \"DJIA\": DJIA_close_ret,\n",
|
||||
" \"RUSS\": RUSS_close_ret, \"NASDAQ\": NASDAQ_close_ret}\n",
|
||||
"indices = [\"SPY\", \"DJIA\", \"RUSS\", \"NASDAQ\"]\n",
|
||||
"futures = [\"Crude\", \"Gold\", \"DOW\", \"NASDAQ\", \"S&P\"]\n",
|
||||
"for index in indices:\n",
|
||||
" matches_dict[index] = {future: signs_match(article_df[[future, 'date']],\n",
|
||||
" index_dict[index]) for future in futures}\n",
|
||||
" count_dict[index] = {future: len(matches_dict[index][future]) for future in futures}\n",
|
||||
" prediction_dict[index] = {future: np.mean(matches_dict[index][future])\n",
|
||||
" for future in futures}\n",
|
||||
" \n",
|
||||
"print(\"Articles Checked:\")\n",
|
||||
"print(pd.DataFrame.from_dict(count_dict))\n",
|
||||
"print()\n",
|
||||
"print(\"Prediction Accuracy:\")\n",
|
||||
"print(pd.DataFrame.from_dict(prediction_dict))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Well, it appears that the futures data is terrible at predicting market close. NASDAQ predicting NASDAQ is the most interesting data point, but 63% accuracy isn't accurate enough to make money consistently.\n",
|
||||
"\n",
|
||||
"# Final sentiments\n",
|
||||
"\n",
|
||||
"The data bears out very close to what I expected would happen:\n",
|
||||
"\n",
|
||||
"- Futures data is more accurate than a coin flip for predicting openings, which makes sense since it is recorded only 3 hours before the actual opening\n",
|
||||
"- Futures data is about as acccurate as a coin flip for predicting closings, which means there is no money to be made in trying to predict the market direction for the day given the futures data.\n",
|
||||
"\n",
|
||||
"In summary:\n",
|
||||
"\n",
|
||||
"- Cramer is half right: Futures data is not good for predicting the market open of the Dow and NASDAQ indices. Contrary to Cramer though, it is very good for predicting the S&P and Russell indices - we can achieve an accuracy slightly over 80% for each. \n",
|
||||
"- Making money in the market is hard. We can't just go to the futures and treat them as an oracle for where the market will close.\n",
|
||||
"\n",
|
||||
"I hope you've enjoyed this, I quite enjoyed taking a deep dive in the analytics this way. I'll be posting more soon!"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.5.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
734
content/notebooks/2016-1-1-complaining-about-weather.ipynb
Normal file
734
content/notebooks/2016-1-1-complaining-about-weather.ipynb
Normal file
File diff suppressed because one or more lines are too long
721
content/notebooks/2016-1-23-cloudy-in-seattle.ipynb
Normal file
721
content/notebooks/2016-1-23-cloudy-in-seattle.ipynb
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
260
content/notebooks/2016-2-3-guaranteed-money-maker.ipynb
Normal file
260
content/notebooks/2016-2-3-guaranteed-money-maker.ipynb
Normal file
@ -0,0 +1,260 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### If you can see into the future, that is.\n",
|
||||
"\n",
|
||||
"My previous class in Stochastic Calculus covered a lot of interesting topics, and the important one for today\n",
|
||||
"is the [Gambler's Ruin][1] problem. If you're interested in some of the theory behind it, also make sure to check out\n",
|
||||
"[random walks][2]. The important bit is that we studied the [Martingale Betting Strategy][3], which describes for us\n",
|
||||
"a **guaranteed way** to <span style='font-size: x-small'>eventually</span> make money.\n",
|
||||
"\n",
|
||||
"The strategy goes like this: You are going to toss a fair coin with a friend. If you guess heads or tails correctly, you get back double the money you bet. If you guess incorrectly, you lose money. How should you bet?\n",
|
||||
"\n",
|
||||
"The correct answer is that you should double your bet each time you lose. Then when you finally win, you'll be guaranteed to make back everything you lost and then $1 extra! Consider the scenario:\n",
|
||||
"\n",
|
||||
"1. You bet $1, and guess incorrectly. You're 1 dollar in the hole.\n",
|
||||
"2. You bet $2, and guess incorrectly. You're 3 dollars in the hole now.\n",
|
||||
"3. You bet $4, and guess incorrectly. You're 7 dollars in the hole.\n",
|
||||
"4. You bet $8, and guess correctly! You now get back those 8 dollars you bet, plus 8 extra for winning, for a **total profit of one dollar**!\n",
|
||||
"\n",
|
||||
"Mathematically, we can prove that as long as you have unlimited money to bet, you are guaranteed to make money.\n",
|
||||
"\n",
|
||||
"# Applying the Martingale Strategy\n",
|
||||
"\n",
|
||||
"But we're all realistic people, and once you start talking about \"unlimited money\" eyebrows should be raised. Even still, this is an interesting strategy to investigate, and I want to apply it to the stock market. As long as we can guarantee there's a single day in which the stock goes up, we should be able to make money right? The question is just how much we have to invest to guarantee this.\n",
|
||||
"\n",
|
||||
"Now it's time for the math. We'll use the following definitions:\n",
|
||||
"\n",
|
||||
"- $o_i$ = the share price at the opening of day $i$\n",
|
||||
"- $c_i$ = the share price at the close of day $i$\n",
|
||||
"- $d_i$ = the amount of money we want to invest at the beginning of day $i$\n",
|
||||
"\n",
|
||||
"With those definitions in place, I'd like to present the formula that is **guaranteed to make you money**. I call it *Bradlee's Investment Formula*:\n",
|
||||
"\n",
|
||||
"$c_n \\sum_{i=1}^n \\frac{d_i}{o_i} > \\sum_{i=1}^{n} d_i$\n",
|
||||
"\n",
|
||||
"It might not look like much, but if you can manage to make it so that this formula holds true, you will be guaranteed to make money. The intuition behind the formula is this: The closing share price times the number of shares you have purchased ends up greater than the amount of money you invested.\n",
|
||||
"\n",
|
||||
"That is, on day $n$, <span style='font-size: x-small'>if you know what the closing price will be</span> you can set up the amount of money you invest that day to **guarantee you make money**. I'll even teach you to figure out how much money that is! Take a look:\n",
|
||||
"\n",
|
||||
"$\n",
|
||||
"\\begin{align}\n",
|
||||
"c_n \\sum_{i=1}^{n-1} \\frac{d_i}{o_i} + \\frac{c_nd_n}{o_n} &> \\sum_{i=1}^{n-1}d_i + d_n\\\\\n",
|
||||
"\\frac{c_nd_n}{o_n} - d_n &> \\sum_{i=1}^{n-1}(d_i - \\frac{c_nd_i}{o_i})\\\\\n",
|
||||
"d_n (\\frac{c_n - o_n}{o_n}) &> \\sum_{i=1}^{n-1} d_i(1 - \\frac{c_n}{o_i})\\\\\n",
|
||||
"d_n &> \\frac{o_n}{c_n - o_n} \\sum_{i=1}^{n-1} d_i(1 - \\frac{1}{o_i})\n",
|
||||
"\\end{align}$\n",
|
||||
"\n",
|
||||
"If you invest exactly $d_n$ that day, you'll break even. But if you can make sure the money you invest is greater than that quantity on the right <span style='font-size: x-small'>(which requires that you have a crystal ball tell you the stock's closing price)</span> you are **guaranteed to make money!**\n",
|
||||
"\n",
|
||||
"# Interesting Implications\n",
|
||||
"\n",
|
||||
"On a more serious note though, the formula above tells us a couple of interesting things:\n",
|
||||
"\n",
|
||||
"1. It's impossible to make money without the closing price at some point being greater than the opening price (or vice-versa if you are short selling) - there is no amount of money you can invest that will turn things in your favor.\n",
|
||||
"2. Close prices of the past aren't important if you're concerned about the bottom line. While chart technicians use price history to make judgment calls, in the end, the closing price on anything other than the last day is irrelevant.\n",
|
||||
"3. It's possible to make money as long as there is a single day where the closing price is greater than the opening price! You might have to invest a lot to do so, but it's possible.\n",
|
||||
"4. You must make a prediction about where the stock will close at if you want to know how much to invest. That is, we can set up our investment for the day to make money if the stock goes up 1%, but if it only goes up .5% we'll still lose money.\n",
|
||||
"5. It's possible the winning move is to scale back your position. Consider the scenario:\n",
|
||||
" - You invest money and the stock closes down the day .5%\n",
|
||||
" - You invest tomorrow expecting the stock to go up 1%\n",
|
||||
" - The winning investment to break even (assuming a 1% increase) is to scale back the position, since the shares you purchased at the beginning would then be profitable\n",
|
||||
"\n",
|
||||
"# Running the simulation\n",
|
||||
"\n",
|
||||
"So now that we've defined our investment formula,we need to tweak a couple things in order to make an investment strategy we can actually work with. There are two issues we need to address:\n",
|
||||
"\n",
|
||||
"1. The formula only tells us how much to invest if we want to break even ($d_n$). If we actually want to turn a profit, we need to invest more than that, which we will refer to as the **bias**.\n",
|
||||
"2. The formula assumes we know what the closing price will be on any given day. If we don't know this, we can still invest assuming the stock price will close at a level we choose. If the price doesn't meet this objective, we try again tomorrow! This predetermined closing price will be referred to as the **expectation**.\n",
|
||||
"\n",
|
||||
"Now that we've defined our *bias* and *expectation*, we can actually build a strategy we can simulate. Much like the martingale strategy told you to bet twice your previous bet in order to make money, we've designed a system that tells us how much to bet in order to make money as well.\n",
|
||||
"\n",
|
||||
"Now, let's get to the code!\n",
|
||||
"\n",
|
||||
"[1]: https://en.wikipedia.org/wiki/Gambler's_ruin\n",
|
||||
"[2]: https://en.wikipedia.org/wiki/Random_walk\n",
|
||||
"[3]: https://en.wikipedia.org/wiki/Martingale_%28betting_system%29"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"using Quandl\n",
|
||||
"api_key = \"\"\n",
|
||||
"daily_investment = function(current_open, current_close, purchase_history, open_history)\n",
|
||||
" # We're not going to safeguard against divide by 0 - that's the user's responsibility\n",
|
||||
" t1 = current_close / current_open - 1\n",
|
||||
" t2 = sum(purchase_history - purchase_history*current_close ./ open_history)\n",
|
||||
" return t2 / t1\n",
|
||||
"end;"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And let's code a way to run simulations quickly:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"is_profitable = function(current_price, purchase_history, open_history)\n",
|
||||
" shares = sum(purchase_history ./ open_history)\n",
|
||||
" return current_price*shares > sum(purchase_history)\n",
|
||||
"end\n",
|
||||
"\n",
|
||||
"simulate = function(name, start, init, expected, bias)\n",
|
||||
" ticker_info = quandlget(name, from=start, api_key=api_key)\n",
|
||||
" open_vals = ticker_info[\"Open\"].values\n",
|
||||
" close_vals = ticker_info[\"Close\"].values\n",
|
||||
" invested = [init]\n",
|
||||
" \n",
|
||||
" # The simulation stops once we've made a profit\n",
|
||||
" day = 1\n",
|
||||
" profitable = is_profitable(close_vals[day], invested, open_vals[1:length(invested)]) ||\n",
|
||||
" is_profitable(open_vals[day+1], invested, open_vals[1:length(invested)])\n",
|
||||
" while !profitable\n",
|
||||
" expected_close = open_vals[day+1] * expected\n",
|
||||
" todays_purchase = daily_investment(open_vals[day+1], expected_close, invested, open_vals[1:day])\n",
|
||||
" invested = [invested; todays_purchase + bias]\n",
|
||||
" # expected_profit = expected_close * sum(invested ./ open_vals[1:length(invested)]) - sum(invested)\n",
|
||||
" day += 1\n",
|
||||
" profitable = is_profitable(close_vals[day], invested, open_vals[1:length(invested)]) ||\n",
|
||||
" is_profitable(open_vals[day+1], invested, open_vals[1:length(invested)])\n",
|
||||
" end\n",
|
||||
" \n",
|
||||
" shares = sum(invested ./ open_vals[1:length(invested)])\n",
|
||||
" max_profit = max(close_vals[day], open_vals[day+1])\n",
|
||||
" profit = shares * max_profit - sum(invested)\n",
|
||||
" return (invested, profit)\n",
|
||||
"end\n",
|
||||
"\n",
|
||||
"sim_summary = function(investments, profit)\n",
|
||||
" leverages = [sum(investments[1:i]) for i=1:length(investments)]\n",
|
||||
" max_leverage = maximum(leverages) / investments[1]\n",
|
||||
" println(\"Max leverage: $(max_leverage)\")\n",
|
||||
" println(\"Days invested: $(length(investments))\")\n",
|
||||
" println(\"Profit: $profit\")\n",
|
||||
"end;"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, let's get some data and run a simulation! Our first test:\n",
|
||||
"\n",
|
||||
"- We'll invest 100 dollars in LMT, and expect that the stock will close up 1% every day. We'll invest $d_n$ + 10 dollars every day that we haven't turned a profit, and end the simulation once we've made a profit."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Max leverage: 5.590373200042106\n",
|
||||
"Days invested: 5\n",
|
||||
"Profit: 0.6894803101560001\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"investments, profit = simulate(\"YAHOO/LMT\", Date(2015, 11, 29), 100, 1.01, 10)\n",
|
||||
"sim_summary(investments, profit)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The result: We need to invest 5.6x our initial position over a period of 5 days to make approximately .69¢\n",
|
||||
"\n",
|
||||
"- Now let's try the same thing, but we'll assume the stock closes up 2% instead."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Max leverage: 1.854949900247809\n",
|
||||
"Days invested: 25\n",
|
||||
"Profit: 0.08304813163696423\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"investments, profit = simulate(\"YAHOO/LMT\", Date(2015, 11, 29), 100, 1.02, 10)\n",
|
||||
"sim_summary(investments, profit)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example, we only get up to a 1.85x leveraged position, but it takes 25 days to turn a profit of 8¢"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Summary\n",
|
||||
"\n",
|
||||
"We've defined an investment strategy that can tell us how much to invest when we know what the closing position of a stock will be. We can tweak the strategy to actually make money, but plenty of work needs to be done so that we can optimize the money invested.\n",
|
||||
"\n",
|
||||
"In the next post I'm going to post more information about some backtests and strategy tests on this strategy (unless of course this experiment actually produces a significant profit potential, and then I'm keeping it for myself)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Side note and disclaimer\n",
|
||||
"\n",
|
||||
"The claims made in this presentation about being able to guarantee making money are intended as a joke and do not constitute investment advice of any sort."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Julia 0.4.2",
|
||||
"language": "julia",
|
||||
"name": "julia-0.4"
|
||||
},
|
||||
"language_info": {
|
||||
"file_extension": ".jl",
|
||||
"mimetype": "application/julia",
|
||||
"name": "julia",
|
||||
"version": "0.4.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
103
develop_server.sh
Executable file
103
develop_server.sh
Executable file
@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env bash
|
||||
##
|
||||
# This section should match your Makefile
|
||||
##
|
||||
PY=${PY:-python}
|
||||
PELICAN=${PELICAN:-pelican}
|
||||
PELICANOPTS=
|
||||
|
||||
BASEDIR=$(pwd)
|
||||
INPUTDIR=$BASEDIR/content
|
||||
OUTPUTDIR=$BASEDIR/output
|
||||
CONFFILE=$BASEDIR/pelicanconf.py
|
||||
|
||||
###
|
||||
# Don't change stuff below here unless you are sure
|
||||
###
|
||||
|
||||
SRV_PID=$BASEDIR/srv.pid
|
||||
PELICAN_PID=$BASEDIR/pelican.pid
|
||||
|
||||
function usage(){
|
||||
echo "usage: $0 (stop) (start) (restart) [port]"
|
||||
echo "This starts Pelican in debug and reload mode and then launches"
|
||||
echo "an HTTP server to help site development. It doesn't read"
|
||||
echo "your Pelican settings, so if you edit any paths in your Makefile"
|
||||
echo "you will need to edit your settings as well."
|
||||
exit 3
|
||||
}
|
||||
|
||||
function alive() {
|
||||
kill -0 $1 >/dev/null 2>&1
|
||||
}
|
||||
|
||||
function shut_down(){
|
||||
PID=$(cat $SRV_PID)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
if alive $PID; then
|
||||
echo "Stopping HTTP server"
|
||||
kill $PID
|
||||
else
|
||||
echo "Stale PID, deleting"
|
||||
fi
|
||||
rm $SRV_PID
|
||||
else
|
||||
echo "HTTP server PIDFile not found"
|
||||
fi
|
||||
|
||||
PID=$(cat $PELICAN_PID)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
if alive $PID; then
|
||||
echo "Killing Pelican"
|
||||
kill $PID
|
||||
else
|
||||
echo "Stale PID, deleting"
|
||||
fi
|
||||
rm $PELICAN_PID
|
||||
else
|
||||
echo "Pelican PIDFile not found"
|
||||
fi
|
||||
}
|
||||
|
||||
function start_up(){
|
||||
local port=$1
|
||||
echo "Starting up Pelican and HTTP server"
|
||||
shift
|
||||
$PELICAN --debug --autoreload -r $INPUTDIR -o $OUTPUTDIR -s $CONFFILE $PELICANOPTS &
|
||||
pelican_pid=$!
|
||||
echo $pelican_pid > $PELICAN_PID
|
||||
cd $OUTPUTDIR
|
||||
$PY -m pelican.server $port &
|
||||
srv_pid=$!
|
||||
echo $srv_pid > $SRV_PID
|
||||
cd $BASEDIR
|
||||
sleep 1
|
||||
if ! alive $pelican_pid ; then
|
||||
echo "Pelican didn't start. Is the Pelican package installed?"
|
||||
return 1
|
||||
elif ! alive $srv_pid ; then
|
||||
echo "The HTTP server didn't start. Is there another service using port" $port "?"
|
||||
return 1
|
||||
fi
|
||||
echo 'Pelican and HTTP server processes now running in background.'
|
||||
}
|
||||
|
||||
###
|
||||
# MAIN
|
||||
###
|
||||
[[ ($# -eq 0) || ($# -gt 2) ]] && usage
|
||||
port=''
|
||||
[[ $# -eq 2 ]] && port=$2
|
||||
|
||||
if [[ $1 == "stop" ]]; then
|
||||
shut_down
|
||||
elif [[ $1 == "restart" ]]; then
|
||||
shut_down
|
||||
start_up $port
|
||||
elif [[ $1 == "start" ]]; then
|
||||
if ! start_up $port; then
|
||||
shut_down
|
||||
fi
|
||||
else
|
||||
usage
|
||||
fi
|
94
fabfile.py
vendored
Normal file
94
fabfile.py
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
from fabric.api import *
|
||||
import fabric.contrib.project as project
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import SocketServer
|
||||
|
||||
from pelican.server import ComplexHTTPRequestHandler
|
||||
|
||||
# Local path configuration (can be absolute or relative to fabfile)
|
||||
env.deploy_path = 'output'
|
||||
DEPLOY_PATH = env.deploy_path
|
||||
|
||||
# Remote server configuration
|
||||
production = 'root@localhost:22'
|
||||
dest_path = '/var/www'
|
||||
|
||||
# Rackspace Cloud Files configuration settings
|
||||
env.cloudfiles_username = 'my_rackspace_username'
|
||||
env.cloudfiles_api_key = 'my_rackspace_api_key'
|
||||
env.cloudfiles_container = 'my_cloudfiles_container'
|
||||
|
||||
# Github Pages configuration
|
||||
env.github_pages_branch = "gh-pages"
|
||||
|
||||
# Port for `serve`
|
||||
PORT = 8000
|
||||
|
||||
def clean():
|
||||
"""Remove generated files"""
|
||||
if os.path.isdir(DEPLOY_PATH):
|
||||
shutil.rmtree(DEPLOY_PATH)
|
||||
os.makedirs(DEPLOY_PATH)
|
||||
|
||||
def build():
|
||||
"""Build local version of site"""
|
||||
local('pelican -s pelicanconf.py')
|
||||
|
||||
def rebuild():
|
||||
"""`clean` then `build`"""
|
||||
clean()
|
||||
build()
|
||||
|
||||
def regenerate():
|
||||
"""Automatically regenerate site upon file modification"""
|
||||
local('pelican -r -s pelicanconf.py')
|
||||
|
||||
def serve():
|
||||
"""Serve site at http://localhost:8000/"""
|
||||
os.chdir(env.deploy_path)
|
||||
|
||||
class AddressReuseTCPServer(SocketServer.TCPServer):
|
||||
allow_reuse_address = True
|
||||
|
||||
server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler)
|
||||
|
||||
sys.stderr.write('Serving on port {0} ...\n'.format(PORT))
|
||||
server.serve_forever()
|
||||
|
||||
def reserve():
|
||||
"""`build`, then `serve`"""
|
||||
build()
|
||||
serve()
|
||||
|
||||
def preview():
|
||||
"""Build production version of site"""
|
||||
local('pelican -s publishconf.py')
|
||||
|
||||
def cf_upload():
|
||||
"""Publish to Rackspace Cloud Files"""
|
||||
rebuild()
|
||||
with lcd(DEPLOY_PATH):
|
||||
local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 '
|
||||
'-U {cloudfiles_username} '
|
||||
'-K {cloudfiles_api_key} '
|
||||
'upload -c {cloudfiles_container} .'.format(**env))
|
||||
|
||||
@hosts(production)
|
||||
def publish():
|
||||
"""Publish to production via rsync"""
|
||||
local('pelican -s publishconf.py')
|
||||
project.rsync_project(
|
||||
remote_dir=dest_path,
|
||||
exclude=".DS_Store",
|
||||
local_dir=DEPLOY_PATH.rstrip('/') + '/',
|
||||
delete=True,
|
||||
extra_opts='-c',
|
||||
)
|
||||
|
||||
def gh_pages():
|
||||
"""Publish to GitHub Pages"""
|
||||
rebuild()
|
||||
local("ghp-import -b {github_pages_branch} {deploy_path}".format(**env))
|
||||
local("git push origin {github_pages_branch}".format(**env))
|
1
nest
Submodule
1
nest
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 2cd60edff81da1f848b34bfc6209435c352c84a9
|
1
pelican-plugins
Submodule
1
pelican-plugins
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit db9127f60fd65d3f1756bcb5ad477b89348b1c7f
|
44
pelicanconf.py
Normal file
44
pelicanconf.py
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*- #
|
||||
from __future__ import unicode_literals
|
||||
|
||||
AUTHOR = u'Bradlee Speice'
|
||||
SITENAME = u'Bradlee Speice'
|
||||
SITEURL = ''
|
||||
|
||||
PATH = 'content'
|
||||
|
||||
TIMEZONE = 'America/New_York'
|
||||
|
||||
DEFAULT_LANG = u'en'
|
||||
|
||||
# Feed generation is usually not desired when developing
|
||||
FEED_ALL_ATOM = None
|
||||
CATEGORY_FEED_ATOM = None
|
||||
TRANSLATION_FEED_ATOM = None
|
||||
AUTHOR_FEED_ATOM = None
|
||||
AUTHOR_FEED_RSS = None
|
||||
|
||||
# Blogroll
|
||||
# LINKS = (('Pelican', 'http://getpelican.com/'),
|
||||
# ('Python.org', 'http://python.org/'),
|
||||
# ('Jinja2', 'http://jinja.pocoo.org/'),
|
||||
# ('You can modify those links in your config file', '#'),)
|
||||
|
||||
# Social widget
|
||||
SOCIAL = (('Github', 'https://github.com/bspeice'),
|
||||
('LinkedIn', 'https://www.linkedin.com/in/bradleespeice'),)
|
||||
|
||||
DEFAULT_PAGINATION = 10
|
||||
|
||||
# Uncomment following line if you want document-relative URLs when developing
|
||||
#RELATIVE_URLS = True
|
||||
|
||||
PLUGIN_PATHS = ['pelican-plugins/']
|
||||
PLUGINS = ['liquid_tags.notebook']
|
||||
NOTEBOOK_DIR = 'notebooks'
|
||||
|
||||
THEME='nest'
|
||||
#NEST_INDEX_HEADER_TITLE="Bradlee Speice"
|
||||
NEST_INDEX_HEADER_SUBTITLE="Exploring the intersection of Computer Science and Financial Engineering"
|
||||
NEST_HEADER_LOGO="images/logo.svg"
|
24
publishconf.py
Normal file
24
publishconf.py
Normal file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*- #
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# This file is only used if you use `make publish` or
|
||||
# explicitly specify it as your config file.
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.append(os.curdir)
|
||||
from pelicanconf import *
|
||||
|
||||
SITEURL = 'https://bspeice.github.io'
|
||||
RELATIVE_URLS = False
|
||||
|
||||
FEED_ALL_ATOM = 'feeds/all.atom.xml'
|
||||
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
|
||||
|
||||
DELETE_OUTPUT_DIRECTORY = True
|
||||
|
||||
# Following items are often useful when publishing
|
||||
|
||||
#DISQUS_SITENAME = ""
|
||||
#GOOGLE_ANALYTICS = ""
|
Loading…
Reference in New Issue
Block a user