Skip to content

Commit f922fa8

Browse files
Swap from representative tickers to asset ID's. /assets route.
1 parent b58b283 commit f922fa8

File tree

6 files changed

+178
-113
lines changed

6 files changed

+178
-113
lines changed

lib/efficient_frontier.py

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,29 +10,34 @@ def upper_bounds(length):
1010
return np.ones(length).reshape(length, 1)
1111

1212
def format_mean_returns_dataframe(df, length):
13-
return df.sort_index().values.reshape(length, 1)
13+
# # No longer need to sort the index - already doing that
14+
# return df.sort_index().values.reshape(length, 1)
15+
return df.values.reshape(length, 1)
1416

1517
def format_covars_dataframe(df):
16-
return df.sort(axis=0).sort(axis=1).values
18+
# # No longer need to sort the index - already doing that
19+
# return df.sort(axis=0).sort(axis=1).values
20+
return df.values
1721

18-
def format_resulting_weights(weights, tickers):
22+
def format_resulting_weights(weights, asset_ids):
1923
formatted_weights = [ entry[0] for entry in weights ]
2024

2125
allocations = {}
2226
for i, weight in enumerate(formatted_weights):
23-
allocations[tickers[i]] = weight
27+
allocations[asset_ids[i]] = weight
2428

2529
return allocations
2630

2731
#######
2832

29-
def efficient_frontier(tickers, mean_returns, covariance_matrix):
33+
def efficient_frontier(asset_ids, mean_returns, covariance_matrix):
3034
# Format data
31-
number_of_tickers = mean_returns.size
32-
means = format_mean_returns_dataframe(mean_returns, number_of_tickers)
35+
36+
number_of_asset_ids = mean_returns.size
37+
means = format_mean_returns_dataframe(mean_returns, number_of_asset_ids)
3338
covars = format_covars_dataframe(covariance_matrix)
34-
lB = lower_bounds(number_of_tickers)
35-
uB = upper_bounds(number_of_tickers)
39+
lB = lower_bounds(number_of_asset_ids)
40+
uB = upper_bounds(number_of_asset_ids)
3641

3742
# Solve critical line algorithm
3843
cla = CLA(means, covars, lB, uB)
@@ -56,15 +61,15 @@ def efficient_frontier(tickers, mean_returns, covariance_matrix):
5661

5762
allocations = {}
5863
for i, weight in enumerate(allocation):
59-
allocations[tickers[i]] = weight
64+
allocations[asset_ids[i]] = weight
6065

6166
obj['allocations'] = allocations
6267

6368
portfolios.append(obj)
6469

6570
# Add the minimum variance portfolio
6671
var, weights = cla.getMinVar()
67-
allocations = format_resulting_weights(weights, tickers)
72+
allocations = format_resulting_weights(weights, asset_ids)
6873

6974
min_var_port = {
7075
"mu": np.dot(weights.T, means)[0,0],
@@ -74,16 +79,14 @@ def efficient_frontier(tickers, mean_returns, covariance_matrix):
7479

7580
# Add the maximum sharpe ratio portfolio
7681
sr, weights = cla.getMaxSR()
77-
allocations = format_resulting_weights(weights, tickers)
82+
allocations = format_resulting_weights(weights, asset_ids)
7883

7984
max_sr_port = {
8085
"mu": np.dot(weights.T, means)[0,0],
8186
"sigma": np.dot(weights.T, np.dot(covars, weights))[0,0]**0.5,
8287
"allocations": allocations
8388
}
8489

85-
# import pdb; pdb.set_trace()
86-
8790
# Return results
8891
return {
8992
"portfolios": portfolios,

lib/save_data.py

Lines changed: 0 additions & 59 deletions
This file was deleted.

old/save_csv_data.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import os
2+
import redis
3+
import pandas
4+
5+
def write_csv_files(df, returns, correlation_matrix, covariance_matrix):
6+
# Store all data as flat csv files. Whenever you run this function, the output
7+
# files will be overwritten.
8+
this_dir = os.path.dirname(__file__)
9+
csv_dir = os.path.join(this_dir, os.pardir, 'csv')
10+
11+
df.to_csv( os.path.join(csv_dir, 'prices.csv'), header=True, index=True, index_label='Date', sep=',')
12+
returns.to_csv( os.path.join(csv_dir, 'returns.csv'), header=True, index=True, index_label='Date', sep=',')
13+
correlation_matrix.to_csv( os.path.join(csv_dir, 'correlation_matrix.csv'), header=True, index=True, sep=',')
14+
covariance_matrix.to_csv( os.path.join(csv_dir, 'covariance_matrix.csv'), header=True, index=True, sep=',')
File renamed without changes.

server.py

Lines changed: 38 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
###########
44

55
import os
6+
import json
67
import redis
78
import pandas as pd
89

@@ -27,49 +28,65 @@
2728
if not app.config['DEBUG']:
2829
sslify = SSLify(app)
2930

30-
redis_url = os.environ['REDIS_URL']
31-
redis_conn = redis.StrictRedis.from_url(redis_url)
31+
redis_conn = redis.StrictRedis.from_url(os.environ['REDIS_URL'])
3232

3333

3434
###################
3535
# UTILITY METHODS #
3636
###################
3737

3838
def check_for_authorization():
39-
auth_token = os.environ['AUTH_TOKEN']
40-
provided_token = request.headers.get('Authorization') or request.args.get('auth_token')
41-
if (provided_token and provided_token == auth_token):
42-
return True
43-
else:
44-
return abort(403)
45-
46-
def covariance_matrix(tickers):
39+
auth_token = os.environ['AUTH_TOKEN']
40+
provided_token = request.headers.get('Authorization') or request.args.get('auth_token')
41+
if (provided_token and provided_token == auth_token):
42+
return True
43+
else:
44+
return abort(403)
45+
46+
def covariance_matrix(asset_ids):
4747
# Covariance matrix is a *DataFrame*
4848
json = redis_conn.get('covariance_matrix')
49-
df = pd.io.json.read_json(json)
49+
df = pd.io.json.read_json(json)
5050

51-
tickers_set = set(tickers)
52-
available_tickers_set = set(df.index.values)
53-
tickers_to_eliminate = list(available_tickers_set - tickers_set)
51+
asset_ids_set = set(asset_ids)
52+
available_asset_ids_set = set(df.index.values)
53+
asset_ids_to_eliminate = list(available_asset_ids_set - asset_ids_set)
5454

55-
return df.drop(tickers_to_eliminate, axis=0).drop(tickers_to_eliminate, axis=1)
55+
return df.drop(asset_ids_to_eliminate, axis=0).drop(asset_ids_to_eliminate, axis=1)
5656

57-
def mean_returns(tickers):
57+
def mean_returns(asset_ids):
5858
# Mean returns is a *Series*
5959
json = redis_conn.get('mean_returns')
60-
df = pd.io.json.read_json(json, typ='series')
60+
df = pd.io.json.read_json(json, typ='series')
6161

62-
tickers_set = set(tickers)
63-
available_tickers_set = set(df.index.values)
64-
tickers_to_eliminate = list(available_tickers_set - tickers_set)
62+
asset_ids_set = set(asset_ids)
63+
available_asset_ids_set = set(df.index.values)
64+
asset_ids_to_eliminate = list(available_asset_ids_set - asset_ids_set)
6565

66-
return df.drop(tickers_to_eliminate)
66+
return df.drop(asset_ids_to_eliminate)
67+
68+
def build_efficient_frontier_for(asset_ids):
69+
means = mean_returns(asset_ids)
70+
covars = covariance_matrix(asset_ids)
71+
return efficient_frontier(asset_ids, means, covars)
6772

6873

6974
##########
7075
# ROUTES #
7176
##########
7277

78+
@app.route('/assets', methods=['GET'])
79+
def assets_route():
80+
check_for_authorization()
81+
return jsonify( json.loads(redis_conn.get('asset_list')) )
82+
83+
@app.route('/calc', methods=["POST"])
84+
def cla_calc_route():
85+
check_for_authorization()
86+
asset_ids = (request.json)['asset_ids']
87+
app.logger.info("Received CLA calc request for: %s" % asset_ids)
88+
return jsonify(build_efficient_frontier_for(asset_ids))
89+
7390
@app.route('/')
7491
def root():
7592
return 'Hello World!'
@@ -78,20 +95,6 @@ def root():
7895
def health():
7996
return "OK", 200
8097

81-
@app.route('/calc', methods=["POST"])
82-
def cla_calc_route():
83-
check_for_authorization()
84-
85-
j = request.json
86-
tickers = j['tickers']
87-
88-
app.logger.info("Received CLA calc request for tickers: %s" % tickers)
89-
90-
means = mean_returns(tickers)
91-
covars = covariance_matrix(tickers)
92-
93-
return jsonify( efficient_frontier(tickers, means, covars) )
94-
9598

9699
##########
97100
# LOADER #

0 commit comments

Comments
 (0)