From 708f352cc9f4a09af157d1c1356c0db0fbffdc52 Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Sun, 10 Apr 2022 15:27:31 +0800 Subject: [PATCH 1/9] Commit the code of HIST and IGMTF on Alpha360 --- examples/benchmarks/HIST/README.md | 3 + examples/benchmarks/HIST/requirements.txt | 4 + .../HIST/workflow_config_hist_Alpha360.yaml | 92 +++ examples/benchmarks/IGMTF/README.md | 4 + examples/benchmarks/IGMTF/requirements.txt | 4 + .../IGMTF/workflow_config_igmtf_Alpha360.yaml | 89 +++ examples/benchmarks/README.md | 3 + qlib/contrib/model/pytorch_hist.py | 522 ++++++++++++++++++ qlib/contrib/model/pytorch_igmtf.py | 439 +++++++++++++++ 9 files changed, 1160 insertions(+) create mode 100644 examples/benchmarks/HIST/README.md create mode 100644 examples/benchmarks/HIST/requirements.txt create mode 100644 examples/benchmarks/HIST/workflow_config_hist_Alpha360.yaml create mode 100644 examples/benchmarks/IGMTF/README.md create mode 100644 examples/benchmarks/IGMTF/requirements.txt create mode 100644 examples/benchmarks/IGMTF/workflow_config_igmtf_Alpha360.yaml create mode 100644 qlib/contrib/model/pytorch_hist.py create mode 100644 qlib/contrib/model/pytorch_igmtf.py diff --git a/examples/benchmarks/HIST/README.md b/examples/benchmarks/HIST/README.md new file mode 100644 index 00000000000..87f9a35c9b8 --- /dev/null +++ b/examples/benchmarks/HIST/README.md @@ -0,0 +1,3 @@ +# HIST +* Code: [https://github.com/Wentao-Xu/HIST](https://github.com/Wentao-Xu/HIST) +* Paper: [HIST: A Graph-based Framework for Stock Trend Forecasting via Mining Concept-Oriented Shared InformationAdaRNN: Adaptive Learning and Forecasting for Time Series](https://arxiv.org/abs/2110.13716). \ No newline at end of file diff --git a/examples/benchmarks/HIST/requirements.txt b/examples/benchmarks/HIST/requirements.txt new file mode 100644 index 00000000000..d2f37de61af --- /dev/null +++ b/examples/benchmarks/HIST/requirements.txt @@ -0,0 +1,4 @@ +pandas==1.1.2 +numpy==1.21.0 +scikit_learn==0.23.2 +torch==1.7.0 \ No newline at end of file diff --git a/examples/benchmarks/HIST/workflow_config_hist_Alpha360.yaml b/examples/benchmarks/HIST/workflow_config_hist_Alpha360.yaml new file mode 100644 index 00000000000..b3e96f48544 --- /dev/null +++ b/examples/benchmarks/HIST/workflow_config_hist_Alpha360.yaml @@ -0,0 +1,92 @@ +qlib_init: + provider_uri: "~/.qlib/qlib_data/cn_data" + region: cn +market: &market csi300 +benchmark: &benchmark SH000300 +data_handler_config: &data_handler_config + start_time: 2008-01-01 + end_time: 2020-08-01 + fit_start_time: 2008-01-01 + fit_end_time: 2014-12-31 + instruments: *market + infer_processors: + - class: RobustZScoreNorm + kwargs: + fields_group: feature + clip_outlier: true + - class: Fillna + kwargs: + fields_group: feature + learn_processors: + - class: DropnaLabel + - class: CSRankNorm + kwargs: + fields_group: label + label: ["Ref($close, -2) / Ref($close, -1) - 1"] +port_analysis_config: &port_analysis_config + strategy: + class: TopkDropoutStrategy + module_path: qlib.contrib.strategy + kwargs: + signal: + - + - + topk: 50 + n_drop: 5 + backtest: + start_time: 2017-01-01 + end_time: 2020-08-01 + account: 100000000 + benchmark: *benchmark + exchange_kwargs: + limit_threshold: 0.095 + deal_price: close + open_cost: 0.0005 + close_cost: 0.0015 + min_cost: 5 +task: + model: + class: HIST + module_path: qlib.contrib.model.pytorch_hist + kwargs: + d_feat: 6 + hidden_size: 64 + num_layers: 2 + dropout: 0 + n_epochs: 200 + lr: 1e-4 + early_stop: 20 + metric: ic + loss: mse + base_model: LSTM + model_path: "benchmarks/LSTM/model_lstm_csi300.pkl" + stock2concept: "benchmarks/HIST/qlib_csi300_stock2concept.npy" + stock_index: "benchmarks/HIST/qlib_csi300_stock_index.npy" + GPU: 0 + dataset: + class: DatasetH + module_path: qlib.data.dataset + kwargs: + handler: + class: Alpha360 + module_path: qlib.contrib.data.handler + kwargs: *data_handler_config + segments: + train: [2008-01-01, 2014-12-31] + valid: [2015-01-01, 2016-12-31] + test: [2017-01-01, 2020-08-01] + record: + - class: SignalRecord + module_path: qlib.workflow.record_temp + kwargs: + model: + dataset: + - class: SigAnaRecord + module_path: qlib.workflow.record_temp + kwargs: + ana_long_short: False + ann_scaler: 252 + - class: PortAnaRecord + module_path: qlib.workflow.record_temp + kwargs: + config: *port_analysis_config \ No newline at end of file diff --git a/examples/benchmarks/IGMTF/README.md b/examples/benchmarks/IGMTF/README.md new file mode 100644 index 00000000000..dbda8e6b4a3 --- /dev/null +++ b/examples/benchmarks/IGMTF/README.md @@ -0,0 +1,4 @@ +# IGMTF +* Code: [https://github.com/Wentao-Xu/IGMTF](https://github.com/Wentao-Xu/IGMTF) +* Paper: [IGMTF: An Instance-wise Graph-based Framework for +Multivariate Time Series Forecasting](https://arxiv.org/abs/2109.06489). \ No newline at end of file diff --git a/examples/benchmarks/IGMTF/requirements.txt b/examples/benchmarks/IGMTF/requirements.txt new file mode 100644 index 00000000000..bfdf94156e3 --- /dev/null +++ b/examples/benchmarks/IGMTF/requirements.txt @@ -0,0 +1,4 @@ +pandas==1.1.2 +numpy==1.21.0 +scikit_learn==0.23.2 +torch==1.7.0 diff --git a/examples/benchmarks/IGMTF/workflow_config_igmtf_Alpha360.yaml b/examples/benchmarks/IGMTF/workflow_config_igmtf_Alpha360.yaml new file mode 100644 index 00000000000..1fc908ea9ae --- /dev/null +++ b/examples/benchmarks/IGMTF/workflow_config_igmtf_Alpha360.yaml @@ -0,0 +1,89 @@ +qlib_init: + provider_uri: "~/.qlib/qlib_data/cn_data" + region: cn +market: &market csi300 +benchmark: &benchmark SH000300 +data_handler_config: &data_handler_config + start_time: 2008-01-01 + end_time: 2020-08-01 + fit_start_time: 2008-01-01 + fit_end_time: 2014-12-31 + instruments: *market + infer_processors: + - class: RobustZScoreNorm + kwargs: + fields_group: feature + clip_outlier: true + - class: Fillna + kwargs: + fields_group: feature + learn_processors: + - class: DropnaLabel + - class: CSRankNorm + kwargs: + fields_group: label + label: ["Ref($close, -2) / Ref($close, -1) - 1"] +port_analysis_config: &port_analysis_config + strategy: + class: TopkDropoutStrategy + module_path: qlib.contrib.strategy + kwargs: + model: + dataset: + topk: 50 + n_drop: 5 + backtest: + start_time: 2017-01-01 + end_time: 2020-08-01 + account: 100000000 + benchmark: *benchmark + exchange_kwargs: + limit_threshold: 0.095 + deal_price: close + open_cost: 0.0005 + close_cost: 0.0015 + min_cost: 5 +task: + model: + class: IGMTF + module_path: qlib.contrib.model.pytorch_igmtf + kwargs: + d_feat: 6 + hidden_size: 64 + num_layers: 2 + dropout: 0 + n_epochs: 200 + lr: 1e-4 + early_stop: 20 + metric: ic + loss: mse + base_model: LSTM + model_path: "benchmarks/LSTM/model_lstm_csi300.pkl" + GPU: 0 + dataset: + class: DatasetH + module_path: qlib.data.dataset + kwargs: + handler: + class: Alpha360 + module_path: qlib.contrib.data.handler + kwargs: *data_handler_config + segments: + train: [2008-01-01, 2014-12-31] + valid: [2015-01-01, 2016-12-31] + test: [2017-01-01, 2020-08-01] + record: + - class: SignalRecord + module_path: qlib.workflow.record_temp + kwargs: + model: + dataset: + - class: SigAnaRecord + module_path: qlib.workflow.record_temp + kwargs: + ana_long_short: False + ann_scaler: 252 + - class: PortAnaRecord + module_path: qlib.workflow.record_temp + kwargs: + config: *port_analysis_config diff --git a/examples/benchmarks/README.md b/examples/benchmarks/README.md index 5a6a08ccfa8..07c0402c08a 100644 --- a/examples/benchmarks/README.md +++ b/examples/benchmarks/README.md @@ -65,6 +65,9 @@ The numbers shown below demonstrate the performance of the entire `workflow` of | GATs (Petar Velickovic, et al.) | Alpha360 | 0.0476±0.00 | 0.3508±0.02 | 0.0598±0.00 | 0.4604±0.01 | 0.0824±0.02 | 1.1079±0.26 | -0.0894±0.03 | | TCTS(Xueqing Wu, et al.) | Alpha360 | 0.0508±0.00 | 0.3931±0.04 | 0.0599±0.00 | 0.4756±0.03 | 0.0893±0.03 | 1.2256±0.36 | -0.0857±0.02 | | TRA(Hengxu Lin, et al.) | Alpha360 | 0.0485±0.00 | 0.3787±0.03 | 0.0587±0.00 | 0.4756±0.03 | 0.0920±0.03 | 1.2789±0.42 | -0.0834±0.02 | +| IGMTF(Wentao Xu, et al.) | Alpha360 | 0.0480±0.00 | 0.3589±0.02 | 0.0606±0.00 | 0.4773±0.01 | 0.0946±0.02 | 1.3509±0.25 | -0.0716±0.02 | +| HIST(Wentao Xu, et al.) | Alpha360 | 0.0522±0.00 | 0.3530±0.01 | 0.0667±0.00 | 0.4576±0.01 | 0.0987±0.02 | 1.3726±0.27 | -0.0681±0.01 | + - The selected 20 features are based on the feature importance of a lightgbm-based model. - The base model of DoubleEnsemble is LGBM. diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py new file mode 100644 index 00000000000..623d5331064 --- /dev/null +++ b/qlib/contrib/model/pytorch_hist.py @@ -0,0 +1,522 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import pandas as pd +from typing import Text, Union +import urllib.request +import copy +from ...utils import get_or_create_path +from ...log import get_module_logger +import torch +import torch.nn as nn +import torch.optim as optim +import collections +from .pytorch_utils import count_parameters +from ...model.base import Model +from ...data.dataset import DatasetH +from ...data.dataset.handler import DataHandlerLP +from ...contrib.model.pytorch_lstm import LSTMModel +from ...contrib.model.pytorch_gru import GRUModel + +class HIST(Model): + """HIST Model + + Parameters + ---------- + lr : float + learning rate + d_feat : int + input dimensions for each time step + metric : str + the evaluate metric used in early stop + optimizer : str + optimizer name + GPU : str + the GPU ID(s) used for training + """ + + def __init__( + self, + d_feat=6, + hidden_size=64, + num_layers=2, + dropout=0.0, + n_epochs=200, + lr=0.001, + metric="", + early_stop=20, + loss="mse", + base_model="GRU", + model_path=None, + stock2concept = None, + stock_index = None, + optimizer="adam", + GPU=0, + seed=None, + **kwargs + ): + # Set logger. + self.logger = get_module_logger("HIST") + self.logger.info("HIST pytorch version...") + + # set hyper-parameters. + self.d_feat = d_feat + self.hidden_size = hidden_size + self.num_layers = num_layers + self.dropout = dropout + self.n_epochs = n_epochs + self.lr = lr + self.metric = metric + self.early_stop = early_stop + self.optimizer = optimizer.lower() + self.loss = loss + self.base_model = base_model + self.model_path = model_path + self.stock2concept = stock2concept + self.stock_index = stock_index + self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu") + self.seed = seed + + self.logger.info( + "HIST parameters setting:" + "\nd_feat : {}" + "\nhidden_size : {}" + "\nnum_layers : {}" + "\ndropout : {}" + "\nn_epochs : {}" + "\nlr : {}" + "\nmetric : {}" + "\nearly_stop : {}" + "\noptimizer : {}" + "\nloss_type : {}" + "\nbase_model : {}" + "\nstock2concept : {}" + "\nstock_index : {}" + "\nuse_GPU : {}" + "\nseed : {}".format( + d_feat, + hidden_size, + num_layers, + dropout, + n_epochs, + lr, + metric, + early_stop, + optimizer.lower(), + loss, + base_model, + model_path, + stock2concept, + stock_index, + GPU, + seed, + ) + ) + + if self.seed is not None: + np.random.seed(self.seed) + torch.manual_seed(self.seed) + + self.HIST_model = HISTModel( + d_feat=self.d_feat, + hidden_size=self.hidden_size, + num_layers=self.num_layers, + dropout=self.dropout, + base_model=self.base_model, + ) + self.logger.info("model:\n{:}".format(self.HIST_model)) + self.logger.info("model size: {:.4f} MB".format(count_parameters(self.HIST_model))) + if optimizer.lower() == "adam": + self.train_optimizer = optim.Adam(self.HIST_model.parameters(), lr=self.lr) + elif optimizer.lower() == "gd": + self.train_optimizer = optim.SGD(self.HIST_model.parameters(), lr=self.lr) + else: + raise NotImplementedError("optimizer {} is not supported!".format(optimizer)) + + self.fitted = False + self.HIST_model.to(self.device) + + @property + def use_gpu(self): + return self.device != torch.device("cpu") + + def mse(self, pred, label): + loss = (pred - label) ** 2 + return torch.mean(loss) + + def loss_fn(self, pred, label): + mask = ~torch.isnan(label) + + if self.loss == "mse": + return self.mse(pred[mask], label[mask]) + + raise ValueError("unknown loss `%s`" % self.loss) + + def metric_fn(self, pred, label): + + mask = torch.isfinite(label) + + if self.metric == "ic": + x = pred[mask] + y = label[mask] + + vx = x - torch.mean(x) + vy = y - torch.mean(y) + return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2))) + + if self.metric == "" or self.metric == "loss": + return -self.loss_fn(pred[mask], label[mask]) + + raise ValueError("unknown metric `%s`" % self.metric) + + def get_daily_inter(self, df, shuffle=False): + # organize the train data into daily batches + daily_count = df.groupby(level=0).size().values + daily_index = np.roll(np.cumsum(daily_count), 1) + daily_index[0] = 0 + if shuffle: + # shuffle data + daily_shuffle = list(zip(daily_index, daily_count)) + np.random.shuffle(daily_shuffle) + daily_index, daily_count = zip(*daily_shuffle) + return daily_index, daily_count + + def train_epoch(self, x_train, y_train, stock_index): + + stock2concept_matrix = np.load(self.stock2concept) + x_train_values = x_train.values + y_train_values = np.squeeze(y_train.values) + stock_index = stock_index.values + stock_index[np.isnan(stock_index)] = 733 + self.HIST_model.train() + + # organize the train data into daily batches + daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + feature = torch.from_numpy(x_train_values[batch]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) + label = torch.from_numpy(y_train_values[batch]).float().to(self.device) + pred = self.HIST_model(feature, concept_matrix) + loss = self.loss_fn(pred, label) + + self.train_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_value_(self.HIST_model.parameters(), 3.0) + self.train_optimizer.step() + + def test_epoch(self, data_x, data_y, stock_index): + + # prepare training data + stock2concept_matrix = np.load(self.stock2concept) + x_values = data_x.values + y_values = np.squeeze(data_y.values) + stock_index = stock_index.values + stock_index[np.isnan(stock_index)] = 733 + self.HIST_model.eval() + + scores = [] + losses = [] + + # organize the test data into daily batches + daily_index, daily_count = self.get_daily_inter(data_x, shuffle=False) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + feature = torch.from_numpy(x_values[batch]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) + label = torch.from_numpy(y_values[batch]).float().to(self.device) + with torch.no_grad(): + pred = self.HIST_model(feature, concept_matrix) + loss = self.loss_fn(pred, label) + losses.append(loss.item()) + + score = self.metric_fn(pred, label) + scores.append(score.item()) + + return np.mean(losses), np.mean(scores) + + def fit( + self, + dataset: DatasetH, + evals_result=dict(), + save_path=None, + ): + df_train, df_valid, df_test = dataset.prepare( + ["train", "valid", "test"], + col_set=["feature", "label"], + data_key=DataHandlerLP.DK_L, + ) + if df_train.empty or df_valid.empty: + raise ValueError("Empty data from dataset, please check your dataset config.") + + if not os.path.exists(self.stock2concept): + url = 'http://fintech.msra.cn/stock_data/downloads/qlib_csi300_stock2concept.npy' + urllib.request.urlretrieve(url, self.stock2concept) + + + stock_index = np.load(self.stock_index, allow_pickle=True).item() + df_train['stock_index'] = 733 + df_train['stock_index'] = df_train.index.get_level_values('instrument').map(stock_index) + df_valid['stock_index'] = 733 + df_valid['stock_index'] = df_valid.index.get_level_values('instrument').map(stock_index) + + x_train, y_train, stock_index_train = df_train["feature"], df_train["label"], df_train["stock_index"] + x_valid, y_valid, stock_index_valid = df_valid["feature"], df_valid["label"], df_valid["stock_index"] + + save_path = get_or_create_path(save_path) + + stop_steps = 0 + best_score = -np.inf + best_epoch = 0 + evals_result["train"] = [] + evals_result["valid"] = [] + + # load pretrained base_model + if self.base_model == "LSTM": + pretrained_model = LSTMModel() + elif self.base_model == "GRU": + pretrained_model = GRUModel() + else: + raise ValueError("unknown base model name `%s`" % self.base_model) + + if self.model_path is not None: + self.logger.info("Loading pretrained model...") + pretrained_model.load_state_dict(torch.load(self.model_path)) + + model_dict = self.HIST_model.state_dict() + pretrained_dict = {k: v for k, v in pretrained_model.state_dict().items() if k in model_dict} + model_dict.update(pretrained_dict) + self.HIST_model.load_state_dict(model_dict) + self.logger.info("Loading pretrained model Done...") + + # train + self.logger.info("training...") + self.fitted = True + + for step in range(self.n_epochs): + self.logger.info("Epoch%d:", step) + self.logger.info("training...") + self.train_epoch(x_train, y_train, stock_index_train) + + self.logger.info("evaluating...") + train_loss, train_score = self.test_epoch(x_train, y_train, stock_index_train) + val_loss, val_score = self.test_epoch(x_valid, y_valid, stock_index_valid) + self.logger.info("train %.6f, valid %.6f" % (train_score, val_score)) + evals_result["train"].append(train_score) + evals_result["valid"].append(val_score) + + if val_score > best_score: + best_score = val_score + stop_steps = 0 + best_epoch = step + best_param = copy.deepcopy(self.HIST_model.state_dict()) + else: + stop_steps += 1 + if stop_steps >= self.early_stop: + self.logger.info("early stop") + break + + self.logger.info("best score: %.6lf @ %d" % (best_score, best_epoch)) + self.HIST_model.load_state_dict(best_param) + torch.save(best_param, save_path) + + + def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): + if not self.fitted: + raise ValueError("model is not fitted yet!") + + stock2concept_matrix = np.load(self.stock2concept) + stock_index = np.load(self.stock_index, allow_pickle=True).item() + df_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I) + df_test['stock_index'] = 733 + df_test['stock_index'] = df_test.index.get_level_values('instrument').map(stock_index) + stock_index_test = df_test['stock_index'].values + stock_index_test[np.isnan(stock_index_test)]=733 + stock_index_test = stock_index_test.astype('int') + df_test = df_test.drop(['stock_index'], axis=1) + index = df_test.index + + self.HIST_model.eval() + x_values = df_test.values + preds = [] + + # organize the data into daily batches + daily_index, daily_count = self.get_daily_inter(df_test, shuffle=False) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + x_batch = torch.from_numpy(x_values[batch]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index_test[batch]]).float().to(self.device) + + with torch.no_grad(): + pred = self.HIST_model(x_batch, concept_matrix).detach().cpu().numpy() + + preds.append(pred) + + return pd.Series(np.concatenate(preds), index=index) + + +class HISTModel(nn.Module): + def __init__(self, d_feat=6, hidden_size=64, num_layers=2, dropout=0.0, base_model="GRU"): + super().__init__() + + self.d_feat = d_feat + self.hidden_size = hidden_size + + if base_model == "GRU": + self.rnn = nn.GRU( + input_size=d_feat, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + dropout=dropout, + ) + elif base_model == "LSTM": + self.rnn = nn.LSTM( + input_size=d_feat, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + dropout=dropout, + ) + else: + raise ValueError("unknown base model name `%s`" % base_model) + + self.fc_es = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_es.weight) + self.fc_is = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_is.weight) + + self.fc_es_middle = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_es_middle.weight) + self.fc_is_middle = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_is_middle.weight) + + self.fc_es_fore = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_es_fore.weight) + self.fc_is_fore = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_is_fore.weight) + self.fc_indi_fore = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_indi_fore.weight) + + self.fc_es_back = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_es_back.weight) + self.fc_is_back = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_is_back.weight) + self.fc_indi = nn.Linear(hidden_size, hidden_size) + torch.nn.init.xavier_uniform_(self.fc_indi.weight) + + self.leaky_relu = nn.LeakyReLU() + self.softmax_s2t = torch.nn.Softmax(dim = 0) + self.softmax_t2s = torch.nn.Softmax(dim = 1) + + self.fc_out_es = nn.Linear(hidden_size, 1) + self.fc_out_is = nn.Linear(hidden_size, 1) + self.fc_out_indi = nn.Linear(hidden_size, 1) + self.fc_out = nn.Linear(hidden_size, 1) + + def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same + xy = x.mm(torch.t(y)) + x_norm = torch.sqrt(torch.sum(x*x, dim =1)).reshape(-1, 1) + y_norm = torch.sqrt(torch.sum(y*y, dim =1)).reshape(-1, 1) + cos_similarity = xy/x_norm.mm(torch.t(y_norm)) + cos_similarity[cos_similarity != cos_similarity] = 0 + return cos_similarity + + def forward(self, x, concept_matrix): + device = torch.device(torch.get_device(x)) + + x_hidden = x.reshape(len(x), self.d_feat, -1) # [N, F, T] + x_hidden = x_hidden.permute(0, 2, 1) # [N, T, F] + x_hidden, _ = self.rnn(x_hidden) + x_hidden = x_hidden[:, -1, :] + + # Predefined Concept Module + + stock_to_concept = concept_matrix + + stock_to_concept_sum = torch.sum(stock_to_concept, 0).reshape(1, -1).repeat(stock_to_concept.shape[0], 1) + stock_to_concept_sum = stock_to_concept_sum.mul(concept_matrix) + + stock_to_concept_sum = stock_to_concept_sum + (torch.ones(stock_to_concept.shape[0], stock_to_concept.shape[1]).to(device)) + stock_to_concept = stock_to_concept / stock_to_concept_sum #股票到tag的权重 + hidden = torch.t(stock_to_concept).mm(x_hidden) # + + hidden = hidden[hidden.sum(1)!=0] + + concept_to_stock = self.cal_cos_similarity(x_hidden, hidden) + concept_to_stock = self.softmax_t2s(concept_to_stock) + + e_shared_info = concept_to_stock.mm(hidden) + e_shared_info = self.fc_es(e_shared_info) + + e_shared_back = self.fc_es_back(e_shared_info) + output_es = self.fc_es_fore(e_shared_info) + output_es = self.leaky_relu(output_es) + + + # Hidden Concept Module + i_shared_info = x_hidden - e_shared_back + hidden = i_shared_info #每个股票都有一个hidden的tag,所以有280个hidden tags。 + i_stock_to_concept = self.cal_cos_similarity(i_shared_info, hidden) + dim = i_stock_to_concept.shape[0] + diag = i_stock_to_concept.diagonal(0) + i_stock_to_concept = i_stock_to_concept * (torch.ones(dim, dim) - torch.eye(dim)).to(device) + row = torch.linspace(0,dim-1,dim).to(device).long() + column =i_stock_to_concept.max(1)[1].long() + value = i_stock_to_concept.max(1)[0] + i_stock_to_concept[row, column] = 10 + i_stock_to_concept[i_stock_to_concept!=10]=0 + i_stock_to_concept[row, column] = value + i_stock_to_concept = i_stock_to_concept + torch.diag_embed((i_stock_to_concept.sum(0)!=0).float()*diag) + hidden = torch.t(i_shared_info).mm(i_stock_to_concept).t() + hidden = hidden[hidden.sum(1)!=0] + + i_concept_to_stock = self.cal_cos_similarity(i_shared_info, hidden) + i_concept_to_stock = self.softmax_t2s(i_concept_to_stock) + i_shared_info = i_concept_to_stock.mm(hidden) + i_shared_info = self.fc_is(i_shared_info) + + i_shared_back = self.fc_is_back(i_shared_info) + output_is = self.fc_is_fore(i_shared_info) + output_is = self.leaky_relu(output_is) + + # Individual Information Module + individual_info = x_hidden - e_shared_back - i_shared_back + output_indi = individual_info + output_indi = self.fc_indi(output_indi) + output_indi = self.leaky_relu(output_indi) + pred_indi = self.fc_out_indi(output_indi).squeeze() + + # Stock Trend Prediction + all_info = output_es + output_is + output_indi + pred_all = self.fc_out(all_info).squeeze() + + return pred_all + +def average_params(params_list): + assert isinstance(params_list, (tuple, list, collections.deque)) + n = len(params_list) + if n == 1: + return params_list[0] + new_params = collections.OrderedDict() + keys = None + for i, params in enumerate(params_list): + if keys is None: + keys = params.keys() + for k, v in params.items(): + if k not in keys: + raise ValueError("the %d-th model has different params" % i) + if k not in new_params: + new_params[k] = v / float(n) + else: + new_params[k] += v / float(n) + return new_params \ No newline at end of file diff --git a/qlib/contrib/model/pytorch_igmtf.py b/qlib/contrib/model/pytorch_igmtf.py new file mode 100644 index 00000000000..e499c6bb873 --- /dev/null +++ b/qlib/contrib/model/pytorch_igmtf.py @@ -0,0 +1,439 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import pandas as pd +from typing import Text, Union +import copy +from ...utils import get_or_create_path +from ...log import get_module_logger + +import torch +import torch.nn as nn +import torch.optim as optim + +from .pytorch_utils import count_parameters +from ...model.base import Model +from ...data.dataset import DatasetH +from ...data.dataset.handler import DataHandlerLP +from ...contrib.model.pytorch_lstm import LSTMModel +from ...contrib.model.pytorch_gru import GRUModel + +class IGMTF(Model): + """IGMTF Model + + Parameters + ---------- + d_feat : int + input dimension for each time step + metric: str + the evaluate metric used in early stop + optimizer : str + optimizer name + GPU : str + the GPU ID(s) used for training + """ + + def __init__( + self, + d_feat=6, + hidden_size=64, + num_layers=2, + dropout=0.0, + n_epochs=200, + lr=0.001, + metric="", + early_stop=20, + loss="mse", + base_model="GRU", + model_path=None, + optimizer="adam", + GPU=0, + seed=None, + **kwargs + ): + # Set logger. + self.logger = get_module_logger("IGMTF") + self.logger.info("IMGTF pytorch version...") + + # set hyper-parameters. + self.d_feat = d_feat + self.hidden_size = hidden_size + self.num_layers = num_layers + self.dropout = dropout + self.n_epochs = n_epochs + self.lr = lr + self.metric = metric + self.early_stop = early_stop + self.optimizer = optimizer.lower() + self.loss = loss + self.base_model = base_model + self.model_path = model_path + self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu") + self.seed = seed + + self.logger.info( + "IGMTF parameters setting:" + "\nd_feat : {}" + "\nhidden_size : {}" + "\nnum_layers : {}" + "\ndropout : {}" + "\nn_epochs : {}" + "\nlr : {}" + "\nmetric : {}" + "\nearly_stop : {}" + "\noptimizer : {}" + "\nloss_type : {}" + "\nbase_model : {}" + "\nmodel_path : {}" + "\nvisible_GPU : {}" + "\nuse_GPU : {}" + "\nseed : {}".format( + d_feat, + hidden_size, + num_layers, + dropout, + n_epochs, + lr, + metric, + early_stop, + optimizer.lower(), + loss, + base_model, + model_path, + GPU, + self.use_gpu, + seed, + ) + ) + + if self.seed is not None: + np.random.seed(self.seed) + torch.manual_seed(self.seed) + + self.igmtf_model = IGMTFModel( + d_feat=self.d_feat, + hidden_size=self.hidden_size, + num_layers=self.num_layers, + dropout=self.dropout, + base_model=self.base_model, + ) + self.logger.info("model:\n{:}".format(self.igmtf_model)) + self.logger.info("model size: {:.4f} MB".format(count_parameters(self.igmtf_model))) + + if optimizer.lower() == "adam": + self.train_optimizer = optim.Adam(self.igmtf_model.parameters(), lr=self.lr) + elif optimizer.lower() == "gd": + self.train_optimizer = optim.SGD(self.igmtf_model.parameters(), lr=self.lr) + else: + raise NotImplementedError("optimizer {} is not supported!".format(optimizer)) + + self.fitted = False + self.igmtf_model.to(self.device) + + @property + def use_gpu(self): + return self.device != torch.device("cpu") + + def mse(self, pred, label): + loss = (pred - label) ** 2 + return torch.mean(loss) + + def loss_fn(self, pred, label): + mask = ~torch.isnan(label) + + if self.loss == "mse": + return self.mse(pred[mask], label[mask]) + + raise ValueError("unknown loss `%s`" % self.loss) + + def metric_fn(self, pred, label): + + mask = torch.isfinite(label) + + if self.metric == "ic": + x = pred[mask] + y = label[mask] + + vx = x - torch.mean(x) + vy = y - torch.mean(y) + return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2))) + + if self.metric == "" or self.metric == "loss": + return -self.loss_fn(pred[mask], label[mask]) + + raise ValueError("unknown metric `%s`" % self.metric) + + def get_daily_inter(self, df, shuffle=False): + # organize the train data into daily batches + daily_count = df.groupby(level=0).size().values + daily_index = np.roll(np.cumsum(daily_count), 1) + daily_index[0] = 0 + if shuffle: + # shuffle data + daily_shuffle = list(zip(daily_index, daily_count)) + np.random.shuffle(daily_shuffle) + daily_index, daily_count = zip(*daily_shuffle) + return daily_index, daily_count + + + def get_train_hidden(self, x_train): + x_train_values = x_train.values + daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) + self.igmtf_model.eval() + train_hidden = [] + train_hidden_day = [] + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + feature = torch.from_numpy(x_train_values[batch]).float().to(self.device) + out = self.igmtf_model(feature, get_hidden=True) + train_hidden.append(out.detach().cpu()) + train_hidden_day.append(out.detach().cpu().mean(dim=0).unsqueeze(dim=0)) + + train_hidden = np.asarray(train_hidden, dtype = object) + train_hidden_day = torch.cat(train_hidden_day) + + return train_hidden, train_hidden_day + + def train_epoch(self, x_train, y_train, train_hidden, train_hidden_day): + + x_train_values = x_train.values + y_train_values = np.squeeze(y_train.values) + + self.igmtf_model.train() + + daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + feature = torch.from_numpy(x_train_values[batch]).float().to(self.device) + label = torch.from_numpy(y_train_values[batch]).float().to(self.device) + pred = self.igmtf_model(feature, train_hidden = train_hidden, train_hidden_day = train_hidden_day) + loss = self.loss_fn(pred, label) + + self.train_optimizer.zero_grad() + loss.backward() + torch.nn.utils.clip_grad_value_(self.igmtf_model.parameters(), 3.0) + self.train_optimizer.step() + + def test_epoch(self, data_x, data_y, train_hidden, train_hidden_day): + + # prepare training data + x_values = data_x.values + y_values = np.squeeze(data_y.values) + + self.igmtf_model.eval() + + scores = [] + losses = [] + + daily_index, daily_count = self.get_daily_inter(data_x, shuffle=False) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + feature = torch.from_numpy(x_values[batch]).float().to(self.device) + label = torch.from_numpy(y_values[batch]).float().to(self.device) + + pred = self.igmtf_model(feature, train_hidden = train_hidden, train_hidden_day = train_hidden_day) + loss = self.loss_fn(pred, label) + losses.append(loss.item()) + + score = self.metric_fn(pred, label) + scores.append(score.item()) + + return np.mean(losses), np.mean(scores) + + + def fit( + self, + dataset: DatasetH, + evals_result=dict(), + save_path=None, + ): + + df_train, df_valid = dataset.prepare( + ["train", "valid"], + col_set=["feature", "label"], + data_key=DataHandlerLP.DK_L, + ) + if df_train.empty or df_valid.empty: + raise ValueError("Empty data from dataset, please check your dataset config.") + + x_train, y_train = df_train["feature"], df_train["label"] + x_valid, y_valid = df_valid["feature"], df_valid["label"] + + save_path = get_or_create_path(save_path) + stop_steps = 0 + train_loss = 0 + best_score = -np.inf + best_epoch = 0 + evals_result["train"] = [] + evals_result["valid"] = [] + + # load pretrained base_model + if self.base_model == "LSTM": + pretrained_model = LSTMModel() + elif self.base_model == "GRU": + pretrained_model = GRUModel() + else: + raise ValueError("unknown base model name `%s`" % self.base_model) + + if self.model_path is not None: + self.logger.info("Loading pretrained model...") + pretrained_model.load_state_dict(torch.load(self.model_path, map_location=self.device)) + + model_dict = self.igmtf_model.state_dict() + pretrained_dict = { + k: v for k, v in pretrained_model.state_dict().items() if k in model_dict # pylint: disable=E1135 + } + model_dict.update(pretrained_dict) + self.igmtf_model.load_state_dict(model_dict) + self.logger.info("Loading pretrained model Done...") + + + # train + self.logger.info("training...") + self.fitted = True + + for step in range(self.n_epochs): + self.logger.info("Epoch%d:", step) + self.logger.info("training...") + train_hidden, train_hidden_day = self.get_train_hidden(x_train) + self.train_epoch(x_train, y_train, train_hidden, train_hidden_day) + self.logger.info("evaluating...") + train_loss, train_score = self.test_epoch(x_train, y_train, train_hidden, train_hidden_day) + val_loss, val_score = self.test_epoch(x_valid, y_valid, train_hidden, train_hidden_day) + self.logger.info("train %.6f, valid %.6f" % (train_score, val_score)) + evals_result["train"].append(train_score) + evals_result["valid"].append(val_score) + + if val_score > best_score: + best_score = val_score + stop_steps = 0 + best_epoch = step + best_param = copy.deepcopy(self.igmtf_model.state_dict()) + else: + stop_steps += 1 + if stop_steps >= self.early_stop: + self.logger.info("early stop") + break + + self.logger.info("best score: %.6lf @ %d" % (best_score, best_epoch)) + self.igmtf_model.load_state_dict(best_param) + torch.save(best_param, save_path) + + if self.use_gpu: + torch.cuda.empty_cache() + + def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): + if not self.fitted: + raise ValueError("model is not fitted yet!") + x_train = dataset.prepare("train", col_set="feature", data_key=DataHandlerLP.DK_L) + train_hidden, train_hidden_day = self.get_train_hidden(x_train) + x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I) + index = x_test.index + self.igmtf_model.eval() + x_values = x_test.values + sample_num = x_values.shape[0] + preds = [] + + daily_index, daily_count = self.get_daily_inter(x_test, shuffle=False) + + for idx, count in zip(daily_index, daily_count): + batch = slice(idx, idx + count) + x_batch = torch.from_numpy(x_values[batch]).float().to(self.device) + + with torch.no_grad(): + pred = self.igmtf_model(x_batch, train_hidden = train_hidden, train_hidden_day = train_hidden_day).detach().cpu().numpy() + + preds.append(pred) + + return pd.Series(np.concatenate(preds), index=index) + + + +class IGMTFModel(nn.Module): + def __init__(self, d_feat=6, hidden_size=64, num_layers=2, dropout=0.0, base_model="GRU"): + super().__init__() + + if base_model == "GRU": + self.rnn = nn.GRU( + input_size=d_feat, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + dropout=dropout, + ) + elif base_model == "LSTM": + self.rnn = nn.LSTM( + input_size=d_feat, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + dropout=dropout, + ) + else: + raise ValueError("unknown base model name `%s`" % base_model) + self.lins = nn.Sequential() + for i in range(2): + self.lins.add_module("linear"+str(i), nn.Linear(hidden_size, hidden_size)) + self.lins.add_module("leakyrelu"+str(i), nn.LeakyReLU()) + self.fc_output = nn.Linear(hidden_size*2, hidden_size*2) + self.project1 = nn.Linear(hidden_size, hidden_size, bias=False) + self.project2 = nn.Linear(hidden_size, hidden_size, bias=False) + self.fc_out_pred = nn.Linear(hidden_size*2, 1) + + self.leaky_relu = nn.LeakyReLU() + self.d_feat = d_feat + + def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same + xy = x.mm(torch.t(y)) + x_norm = torch.sqrt(torch.sum(x*x, dim =1)).reshape(-1, 1) + y_norm = torch.sqrt(torch.sum(y*y, dim =1)).reshape(-1, 1) + cos_similarity = xy/x_norm.mm(torch.t(y_norm)) + cos_similarity[cos_similarity != cos_similarity] = 0 + return cos_similarity + + def sparse_dense_mul(self, s, d): + i = s._indices() + v = s._values() + dv = d[i[0,:], i[1,:]] # get values from relevant entries of dense matrix + return torch.sparse.FloatTensor(i, v * dv, s.size()) + + + def forward(self, x, get_hidden=False, train_hidden=None, train_hidden_day = None, k_day = 10, n_neighbor=10): + # x: [N, F*T] + device = x.device + x = x.reshape(len(x), self.d_feat, -1) # [N, F, T] + x = x.permute(0, 2, 1) # [N, T, F] + out, _ = self.rnn(x) + out = out[:, -1, :] + out = self.lins(out) + mini_batch_out = out + if get_hidden is True: + return mini_batch_out + + mini_batch_out_day = torch.mean(mini_batch_out, dim=0).unsqueeze(0) + day_similarity = self.cal_cos_similarity(mini_batch_out_day, train_hidden_day.to(device)) + day_index = torch.topk(day_similarity, k_day, dim=1)[1] + sample_train_hidden = train_hidden[day_index.long().cpu()].squeeze() + sample_train_hidden = torch.cat(list(sample_train_hidden)).to(device) + sample_train_hidden = self.lins(sample_train_hidden) + cos_similarity = self.cal_cos_similarity(self.project1(mini_batch_out), self.project2(sample_train_hidden)) + + + row = torch.linspace(0,x.shape[0]-1, x.shape[0]).reshape([-1, 1]).repeat(1, n_neighbor).reshape(1, -1).to(device) + column = torch.topk(cos_similarity, n_neighbor, dim = 1)[1].reshape(1, -1) + mask = torch.sparse_coo_tensor(torch.cat([row, column]), torch.ones([row.shape[1]]).to(device)/n_neighbor, (x.shape[0], sample_train_hidden.shape[0])) + cos_similarity = self.sparse_dense_mul(mask, cos_similarity) + + agg_out = torch.sparse.mm(cos_similarity, self.project2(sample_train_hidden)) + # out = self.fc_out(out).squeeze() + out = self.fc_out_pred(torch.cat([mini_batch_out, agg_out], axis=1)).squeeze() + return out \ No newline at end of file From c321a682068d0eae93cfbc028ecf2cf953f1cd8a Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Sun, 10 Apr 2022 18:09:23 +0800 Subject: [PATCH 2/9] add stock index --- .../benchmarks/HIST/qlib_csi300_stock_index.npy | Bin 0 -> 14738 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 examples/benchmarks/HIST/qlib_csi300_stock_index.npy diff --git a/examples/benchmarks/HIST/qlib_csi300_stock_index.npy b/examples/benchmarks/HIST/qlib_csi300_stock_index.npy new file mode 100644 index 0000000000000000000000000000000000000000..36db027df6209b4b0b7449e949468b712f51ae2a GIT binary patch literal 14738 zcmbu_WxQ5J*9LIHKvN;ooDv%+^tjR zF_S`b7%_49q%gV(+-t=6DNW{6#*UcWWdGfzYqw6bI{p9tI?irCYU0!hlc#qdH7PW8 zpD=ZN^SBWqjF{eWcxW0mY2uXTFm+UOM{V=pt2#~`{onu9w$f&fw(Hq;q_&@{3k}qT zyGYxv)r&?qPoLb>Q5PA~u1%XZ{hcm4f8Jc}Fi;okBJGE?HJhm&`}OPB=U@A4`G;)P ztc#DFKTMaHrAu}h(!pLbsCNg?5tV7)o77mS=FfuB^b|bv;>;AdPf=S&1Rm4P<48R5z4W7;^0_ zt1=YYMOI@dwX3X7kU8B*)*#3u-B{LSaN14QA}Au=MAl|Vw7Ya-D0EXk^cy z#JH)wY$$t64~AHGl+6grNc+g<46%xA!BFZ>vL%5_wXbZ&;IyA?P2eN# zFWWE_IzYB%aJsYftRb<18YtT_dbpc>Ms z)RD3agVRwmn8E94*_9wpv`KbjD0GYrA+U(;BfAq=fX2#DhD6879t^4OD|-@HLD^4+ z5m;J|m%SKDogl*ridZMg-VBLOk`V-Dsgq?Sf#sOWs2VaG{UD_ zs_a8xQ*4@yC9u)oU&ayGd^te&B`6b}F8dKUuLsI_0$=J3nZV$5rcA6Mx73~`lNg*H zB$EkZuLp|~SpAqSfgsKG5Sc=dm3pW&GdMj=rZRXvT&5A&K+Tc;2@0=A$N>Zv^|>;g zA=i0wAcNESGK0X!dZf%`DD)_q#ZclW8~l(tOe0yWi~^s$H^fKi5@S962yg` zAcqkorJg8~*J$vFg8 zPp_798N6O2=MlI_ua)x|3cXG)U?}x^xsbr;dV^fVQ0k3xaSbkVdXrqj;Pqy?lpv1v z7P*We(Oczm0xPh$$rS`va&DI^88W>?u42gbPPv-F>s@jUK_2VfaxFoT>OFEDgVTHE zdV(_6`{V|OOz)Q)8FGCNN8~n!QXiGu34ErH$sG*2 zJ}!6G;BD?dA$Jkj=sziU6Ig+LO78hTu$C^?r{!LTLZ6ZQYKU#1o|XG+NUbk?P9CVC zu;ul6d9Vg=6X*qbr~=>er!UIG|H06fO;W5c$s-JjzATS2czs14Bd`_uRe7Ai>iTQ) z1Vg5;%aaVHz9COBczsizCa^m8mOR5y=-cuvgV%TDIfC5lyYf7Nm7MqF1%_DPmlqjQ z{Xkw~DD*>lnZfBt@(P33kL6VYi|Hrw8iUtQ<#ht@^)q>+2K4Kn%bNswsb9!jHDvaR zFXe59Sih2Y7!v(j-X*Z!;Tw66AW8IFd7mNG@8ko9T)&qO3G7~ekdJD}qf~#Cj|pt< z|0JInroeR?FbNTG-3Ig_2Cm0gMj-*N&*;*S0ykjvWypXV)@b{B7jj@{#v}^`uuB8x zp#*kiw0Y!!8#Q1OJaFR*?G!UE5*K;^yAj4k5jF#ELYT%bY!2*RW0Xg6*aEm|jj`=a z!j`~@&|6X23K$cn_U&5(lNzmQNW(V3l+o(Sw!n-qu_*Nf=8Ulq+W`y0+`edgU`bfG zJoExOLYJ0d2cT!nedrDBLFlan?FihgMq7!K&`7SIG#m)rj?qV95O8}!m-w&?uoq)l zgu%ca2)#vVS77fNlgLWYZonNGt^DG!y>A~zn;rJBp-tb`GjzN7B(m8t6u1+;GLDL{ z2e2<=9EClB{VGIyM;7E^!2X0TFT-BI0X0UcEsn#1I}^r{4|@X#GRAQj0UXqTc^C=Y zh0$vADBxhi)Kj(1-ni!wBQTh4H|>8qkLcz~O|p2MiN|dlPz_hLeCJDnu%#cJY&eBMI{?4GJ8^ zXz3k*qX|nZK~sQDgx+dhGjL3eNo*_ZRNy`h=)yGMSjH>~`vb?-7^PO$4gl_3As+a| zmdVqB`w^x#5e@{7Cv;I3W&kJD7^gP5W&$S?T8f5Qz)6J8ioij@$&6M%4+bisw}8wB z21aWS4gpT75Ros6G8_tQX0(RqFyK_e(n{swz-f$m9_9e|XS4}>1n__wlh~@wT;Oy@ zd;2`#frN>b%K5+<6(Y!qjpmWSnS@zVgrk772#X>MM*|OHEVFP7@L&4g zE@2WE;S}IJMtkL{!1;u!edcMvBN?rRoDMvSFt^V<19)@;x^O1&7)F%Tb04mjlmi#g-yage!n&5hga{ zt^}UVXcOTo;5me;ebLpxa|v_nZLR^HM_6PoTnjv((N?bOfEUzgYm^Vy11}_uqcYq8 zyoj){a&aT@Vn)m4n}C;8*fK_G9Bu|)N?6(wc?1w*jxH5K}v~ zw(xe~m4q1gJAhZ!Xa`YcxD$9aqXqdc;5Ce;4|fBvB~0uy?*U#%SXd`;FYtQ8((pdu z4TR2S@%_LX8Ot&}0KAFO-u@u)WNeXaUR|PKE-I=#hbvVD@1zxG7fJ6pJB9B z`)%N}HCi}bcnA0#qrLK7;PZsGN`?1;FA%1dithtoB+RYd`T+P6qYcN0z?TW_2}bw` z_(}u1@G95@wcQUjW}~Ko`CQ zzD;Nk0K-?ncPd1Xovkuo1K%Y~Y&HA__+A70@GbCt!qmFT?|>f=me$665B!kO*&Y1= z{HVql3-ph`j~OjWKLI~sw8;Mq{FKmJ(*6SctU`-tf@1Y6@N>d6%ffHKFBq-K{~h=x zVP=K;58zjXxlM#WfnO6ARs{Y6ep6vfO}6y@8~AOFacV>S5AZv}+*-JQf!{OQ`rgKV zNY(cT!omX57WgBh&G&Y|p9sAbmiEA(8Ew&D2>45dEk(dCeqrFRHQMgq&OL#@5hht2 z76txJnC5Bd0Q`e6wx_EX^4R9)@UD@gc!IUqt&_uxIUphg$gNf14dgWGT?>{ScDwdnbEGZ0Cr)tF)D#w z8EtAi;6@FY2M^r1!j{O}Ebam9Mwr?>+6=e}qb<;z1G^Ju)?I7?+>{W7WlLbxfOh@~ zjBB*xPdonvCKXzclh`t38(`XsNNy|C+XAx&EJIIV&Sla z7rz706UO$Ly@5R#EfseJZpLW+XCL6^gxJ&zaEn&N4cXeU6L3qy(uSiiaI03ty;}wA z2i&?s3|wk$On=}ugxIPN0B&1jY!MAR1A7vBTaN|;x2q88UD~1OAmH{jCZ(0iU4Xq> zvE`DIFc`Q4BhEj8y$NG$@^=I7SRpPRC#CoSa^F5RT3c9z-GTIz;W)Oe8w%WsUYXdz z;2yxfgoT~L?FsBh=q*2o0sGgOI>Wty11kJ)YUg1%aA(5Y7QVfK18YoTJ7*XH97I^! z?rbD*msYeW#n#4*0uCljEt5wBcWp&`p||4S1l+B{mbqyM>|=mK80~~-AK>nUnPuHr z;84QCHcjJzdoWto?F-zKu(UDS4>+ts1jJkU9S_{AMmv!x!vx@P!q`^riNL)HGn+D# zfFlTV+X_tvj%-Ebk`0FfM-k%mH2_Cfh=3$^7(NBqM3~tTO*3!|VUcHHDsZ0~zCL#}VS>`2gU)6(S&bCN&+nA0gVp1A*fS3;UuOzzK}DKbi@gNa$_8&jL=W zF^MuC4gyYQv~fQesEii6*}y;;+fsfAa0;Wn{ZL>tAEP44lho@jL}Mug27#CWcdi^9h~R`O|<$R%rQYX_JN1fkzSIr1T8n(S(_$ z_nE+B2y;vCvw+7kT6&)iJgxyOGaLPlG)Zvb9YVatMI-S3USiwRR(xo!ep!e|T1&A>|uGb>iN054;7cK!*x zyhb}QbKy4N6@;lp>2~0igwDq34&YUc_CAe= zhkJn65f*mu_X4jc^oI8VZ)in}XJ!|FKk!CIONIx4H&uvu<~EBT1l~+o+I2nzyro8a zU|WQTfwvOIcAJj?Z>!O|gCaZ%yqz$ye(5pb9faPV;5-hzvqI!2F8&GNU4*%H)K3EM zCM>MGcnWw=jY*Uy;c4K#gmDsuXMp!HTF3G%@P5L?*0|?@4-lqSZJq}{NLbny;|1VD zt%yvv2No{^A0|xf^x!4nBNZa@r47f+z(;GeLY;(HfR7O-)3l-wtojn!XUjg4~H+4jF L_}cz;WcU98$U&FG literal 0 HcmV?d00001 From d08089d50fa7571d32482559009bec80f0ca8650 Mon Sep 17 00:00:00 2001 From: Wentao Xu Date: Sun, 10 Apr 2022 21:02:13 +0800 Subject: [PATCH 3/9] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 9ae767771a8..f6e2dc36923 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ Recent released features | Feature | Status | | -- | ------ | +| HIST and IGMTF models | :chart_with_upwards_trend: [Released](https://github.com/microsoft/qlib/pull/1040) on Apr 10, 2022 | | Ibovespa index data | :rice: [Released](https://github.com/microsoft/qlib/pull/990) on Apr 6, 2022 | | Point-in-Time database | :hammer: [Released](https://github.com/microsoft/qlib/pull/343) on Mar 10, 2022 | | Arctic Provider Backend & Orderbook data example | :hammer: [Released](https://github.com/microsoft/qlib/pull/744) on Jan 17, 2022 | @@ -338,6 +339,8 @@ Here is a list of models built on `Qlib`. - [TCN based on pytorch (Shaojie Bai, et al. 2018)](examples/benchmarks/TCN/) - [ADARNN based on pytorch (YunTao Du, et al. 2021)](examples/benchmarks/ADARNN/) - [ADD based on pytorch (Hongshun Tang, et al.2020)](examples/benchmarks/ADD/) +- [IGMTF based on pytorch (Wentao Xu, et al.2021)](examples/benchmarks/IGMTF/) +- [HIST based on pytorch (Wentao Xu, et al.2021)](examples/benchmarks/HIST/) Your PR of new Quant models is highly welcomed. From 985204af3d5f5c6968c4453ae4e136806f28ec3f Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Sun, 10 Apr 2022 21:12:26 +0800 Subject: [PATCH 4/9] delete useless code --- qlib/contrib/model/pytorch_hist.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index 623d5331064..dce5a6033a9 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -500,23 +500,4 @@ def forward(self, x, concept_matrix): all_info = output_es + output_is + output_indi pred_all = self.fc_out(all_info).squeeze() - return pred_all - -def average_params(params_list): - assert isinstance(params_list, (tuple, list, collections.deque)) - n = len(params_list) - if n == 1: - return params_list[0] - new_params = collections.OrderedDict() - keys = None - for i, params in enumerate(params_list): - if keys is None: - keys = params.keys() - for k, v in params.items(): - if k not in keys: - raise ValueError("the %d-th model has different params" % i) - if k not in new_params: - new_params[k] = v / float(n) - else: - new_params[k] += v / float(n) - return new_params \ No newline at end of file + return pred_all \ No newline at end of file From 70f937d9fafd4fd1b90c24e378f452fcdc374480 Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Mon, 11 Apr 2022 15:58:38 +0800 Subject: [PATCH 5/9] fix the bug of code format with black --- qlib/contrib/model/pytorch_hist.py | 114 ++++++++++++++-------------- qlib/contrib/model/pytorch_igmtf.py | 70 +++++++++-------- 2 files changed, 97 insertions(+), 87 deletions(-) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index dce5a6033a9..b0b413f5138 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -11,7 +11,7 @@ from typing import Text, Union import urllib.request import copy -from ...utils import get_or_create_path +from ...utils import get_or_create_path from ...log import get_module_logger import torch import torch.nn as nn @@ -24,6 +24,7 @@ from ...contrib.model.pytorch_lstm import LSTMModel from ...contrib.model.pytorch_gru import GRUModel + class HIST(Model): """HIST Model @@ -54,8 +55,8 @@ def __init__( loss="mse", base_model="GRU", model_path=None, - stock2concept = None, - stock_index = None, + stock2concept=None, + stock_index=None, optimizer="adam", GPU=0, seed=None, @@ -130,7 +131,7 @@ def __init__( dropout=self.dropout, base_model=self.base_model, ) - self.logger.info("model:\n{:}".format(self.HIST_model)) + self.logger.info("model:\n{:}".format(self.HIST_model)) self.logger.info("model size: {:.4f} MB".format(count_parameters(self.HIST_model))) if optimizer.lower() == "adam": self.train_optimizer = optim.Adam(self.HIST_model.parameters(), lr=self.lr) @@ -141,9 +142,9 @@ def __init__( self.fitted = False self.HIST_model.to(self.device) - - @property - def use_gpu(self): + + @property + def use_gpu(self): return self.device != torch.device("cpu") def mse(self, pred, label): @@ -168,7 +169,7 @@ def metric_fn(self, pred, label): vx = x - torch.mean(x) vy = y - torch.mean(y) - return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2))) + return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx**2)) * torch.sqrt(torch.sum(vy**2))) if self.metric == "" or self.metric == "loss": return -self.loss_fn(pred[mask], label[mask]) @@ -189,7 +190,7 @@ def get_daily_inter(self, df, shuffle=False): def train_epoch(self, x_train, y_train, stock_index): - stock2concept_matrix = np.load(self.stock2concept) + stock2concept_matrix = np.load(self.stock2concept) x_train_values = x_train.values y_train_values = np.squeeze(y_train.values) stock_index = stock_index.values @@ -198,11 +199,11 @@ def train_epoch(self, x_train, y_train, stock_index): # organize the train data into daily batches daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) - + for idx, count in zip(daily_index, daily_count): batch = slice(idx, idx + count) feature = torch.from_numpy(x_train_values[batch]).float().to(self.device) - concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) label = torch.from_numpy(y_train_values[batch]).float().to(self.device) pred = self.HIST_model(feature, concept_matrix) loss = self.loss_fn(pred, label) @@ -231,7 +232,7 @@ def test_epoch(self, data_x, data_y, stock_index): for idx, count in zip(daily_index, daily_count): batch = slice(idx, idx + count) feature = torch.from_numpy(x_values[batch]).float().to(self.device) - concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index[batch]]).float().to(self.device) label = torch.from_numpy(y_values[batch]).float().to(self.device) with torch.no_grad(): pred = self.HIST_model(feature, concept_matrix) @@ -254,19 +255,18 @@ def fit( col_set=["feature", "label"], data_key=DataHandlerLP.DK_L, ) - if df_train.empty or df_valid.empty: + if df_train.empty or df_valid.empty: raise ValueError("Empty data from dataset, please check your dataset config.") if not os.path.exists(self.stock2concept): - url = 'http://fintech.msra.cn/stock_data/downloads/qlib_csi300_stock2concept.npy' + url = "http://fintech.msra.cn/stock_data/downloads/qlib_csi300_stock2concept.npy" urllib.request.urlretrieve(url, self.stock2concept) - stock_index = np.load(self.stock_index, allow_pickle=True).item() - df_train['stock_index'] = 733 - df_train['stock_index'] = df_train.index.get_level_values('instrument').map(stock_index) - df_valid['stock_index'] = 733 - df_valid['stock_index'] = df_valid.index.get_level_values('instrument').map(stock_index) + df_train["stock_index"] = 733 + df_train["stock_index"] = df_train.index.get_level_values("instrument").map(stock_index) + df_valid["stock_index"] = 733 + df_valid["stock_index"] = df_valid.index.get_level_values("instrument").map(stock_index) x_train, y_train, stock_index_train = df_train["feature"], df_train["label"], df_train["stock_index"] x_valid, y_valid, stock_index_valid = df_valid["feature"], df_valid["label"], df_valid["stock_index"] @@ -300,7 +300,7 @@ def fit( # train self.logger.info("training...") self.fitted = True - + for step in range(self.n_epochs): self.logger.info("Epoch%d:", step) self.logger.info("training...") @@ -328,7 +328,6 @@ def fit( self.HIST_model.load_state_dict(best_param) torch.save(best_param, save_path) - def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): if not self.fitted: raise ValueError("model is not fitted yet!") @@ -336,12 +335,12 @@ def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): stock2concept_matrix = np.load(self.stock2concept) stock_index = np.load(self.stock_index, allow_pickle=True).item() df_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I) - df_test['stock_index'] = 733 - df_test['stock_index'] = df_test.index.get_level_values('instrument').map(stock_index) - stock_index_test = df_test['stock_index'].values - stock_index_test[np.isnan(stock_index_test)]=733 - stock_index_test = stock_index_test.astype('int') - df_test = df_test.drop(['stock_index'], axis=1) + df_test["stock_index"] = 733 + df_test["stock_index"] = df_test.index.get_level_values("instrument").map(stock_index) + stock_index_test = df_test["stock_index"].values + stock_index_test[np.isnan(stock_index_test)] = 733 + stock_index_test = stock_index_test.astype("int") + df_test = df_test.drop(["stock_index"], axis=1) index = df_test.index self.HIST_model.eval() @@ -354,7 +353,7 @@ def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): for idx, count in zip(daily_index, daily_count): batch = slice(idx, idx + count) x_batch = torch.from_numpy(x_values[batch]).float().to(self.device) - concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index_test[batch]]).float().to(self.device) + concept_matrix = torch.from_numpy(stock2concept_matrix[stock_index_test[batch]]).float().to(self.device) with torch.no_grad(): pred = self.HIST_model(x_batch, concept_matrix).detach().cpu().numpy() @@ -415,44 +414,46 @@ def __init__(self, d_feat=6, hidden_size=64, num_layers=2, dropout=0.0, base_mod torch.nn.init.xavier_uniform_(self.fc_indi.weight) self.leaky_relu = nn.LeakyReLU() - self.softmax_s2t = torch.nn.Softmax(dim = 0) - self.softmax_t2s = torch.nn.Softmax(dim = 1) - + self.softmax_s2t = torch.nn.Softmax(dim=0) + self.softmax_t2s = torch.nn.Softmax(dim=1) + self.fc_out_es = nn.Linear(hidden_size, 1) self.fc_out_is = nn.Linear(hidden_size, 1) self.fc_out_indi = nn.Linear(hidden_size, 1) self.fc_out = nn.Linear(hidden_size, 1) - def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same + def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same xy = x.mm(torch.t(y)) - x_norm = torch.sqrt(torch.sum(x*x, dim =1)).reshape(-1, 1) - y_norm = torch.sqrt(torch.sum(y*y, dim =1)).reshape(-1, 1) - cos_similarity = xy/x_norm.mm(torch.t(y_norm)) + x_norm = torch.sqrt(torch.sum(x * x, dim=1)).reshape(-1, 1) + y_norm = torch.sqrt(torch.sum(y * y, dim=1)).reshape(-1, 1) + cos_similarity = xy / x_norm.mm(torch.t(y_norm)) cos_similarity[cos_similarity != cos_similarity] = 0 return cos_similarity def forward(self, x, concept_matrix): device = torch.device(torch.get_device(x)) - x_hidden = x.reshape(len(x), self.d_feat, -1) # [N, F, T] - x_hidden = x_hidden.permute(0, 2, 1) # [N, T, F] + x_hidden = x.reshape(len(x), self.d_feat, -1) # [N, F, T] + x_hidden = x_hidden.permute(0, 2, 1) # [N, T, F] x_hidden, _ = self.rnn(x_hidden) x_hidden = x_hidden[:, -1, :] # Predefined Concept Module - - stock_to_concept = concept_matrix - + + stock_to_concept = concept_matrix + stock_to_concept_sum = torch.sum(stock_to_concept, 0).reshape(1, -1).repeat(stock_to_concept.shape[0], 1) stock_to_concept_sum = stock_to_concept_sum.mul(concept_matrix) - stock_to_concept_sum = stock_to_concept_sum + (torch.ones(stock_to_concept.shape[0], stock_to_concept.shape[1]).to(device)) - stock_to_concept = stock_to_concept / stock_to_concept_sum #股票到tag的权重 - hidden = torch.t(stock_to_concept).mm(x_hidden) # - - hidden = hidden[hidden.sum(1)!=0] - - concept_to_stock = self.cal_cos_similarity(x_hidden, hidden) + stock_to_concept_sum = stock_to_concept_sum + ( + torch.ones(stock_to_concept.shape[0], stock_to_concept.shape[1]).to(device) + ) + stock_to_concept = stock_to_concept / stock_to_concept_sum # 股票到tag的权重 + hidden = torch.t(stock_to_concept).mm(x_hidden) # + + hidden = hidden[hidden.sum(1) != 0] + + concept_to_stock = self.cal_cos_similarity(x_hidden, hidden) concept_to_stock = self.softmax_t2s(concept_to_stock) e_shared_info = concept_to_stock.mm(hidden) @@ -462,23 +463,22 @@ def forward(self, x, concept_matrix): output_es = self.fc_es_fore(e_shared_info) output_es = self.leaky_relu(output_es) - # Hidden Concept Module i_shared_info = x_hidden - e_shared_back - hidden = i_shared_info #每个股票都有一个hidden的tag,所以有280个hidden tags。 - i_stock_to_concept = self.cal_cos_similarity(i_shared_info, hidden) + hidden = i_shared_info # 每个股票都有一个hidden的tag,所以有280个hidden tags。 + i_stock_to_concept = self.cal_cos_similarity(i_shared_info, hidden) dim = i_stock_to_concept.shape[0] diag = i_stock_to_concept.diagonal(0) i_stock_to_concept = i_stock_to_concept * (torch.ones(dim, dim) - torch.eye(dim)).to(device) - row = torch.linspace(0,dim-1,dim).to(device).long() - column =i_stock_to_concept.max(1)[1].long() + row = torch.linspace(0, dim - 1, dim).to(device).long() + column = i_stock_to_concept.max(1)[1].long() value = i_stock_to_concept.max(1)[0] i_stock_to_concept[row, column] = 10 - i_stock_to_concept[i_stock_to_concept!=10]=0 + i_stock_to_concept[i_stock_to_concept != 10] = 0 i_stock_to_concept[row, column] = value - i_stock_to_concept = i_stock_to_concept + torch.diag_embed((i_stock_to_concept.sum(0)!=0).float()*diag) + i_stock_to_concept = i_stock_to_concept + torch.diag_embed((i_stock_to_concept.sum(0) != 0).float() * diag) hidden = torch.t(i_shared_info).mm(i_stock_to_concept).t() - hidden = hidden[hidden.sum(1)!=0] + hidden = hidden[hidden.sum(1) != 0] i_concept_to_stock = self.cal_cos_similarity(i_shared_info, hidden) i_concept_to_stock = self.softmax_t2s(i_concept_to_stock) @@ -490,7 +490,7 @@ def forward(self, x, concept_matrix): output_is = self.leaky_relu(output_is) # Individual Information Module - individual_info = x_hidden - e_shared_back - i_shared_back + individual_info = x_hidden - e_shared_back - i_shared_back output_indi = individual_info output_indi = self.fc_indi(output_indi) output_indi = self.leaky_relu(output_indi) @@ -500,4 +500,4 @@ def forward(self, x, concept_matrix): all_info = output_es + output_is + output_indi pred_all = self.fc_out(all_info).squeeze() - return pred_all \ No newline at end of file + return pred_all diff --git a/qlib/contrib/model/pytorch_igmtf.py b/qlib/contrib/model/pytorch_igmtf.py index e499c6bb873..2bf8911f895 100644 --- a/qlib/contrib/model/pytorch_igmtf.py +++ b/qlib/contrib/model/pytorch_igmtf.py @@ -24,6 +24,7 @@ from ...contrib.model.pytorch_lstm import LSTMModel from ...contrib.model.pytorch_gru import GRUModel + class IGMTF(Model): """IGMTF Model @@ -51,7 +52,7 @@ def __init__( early_stop=20, loss="mse", base_model="GRU", - model_path=None, + model_path=None, optimizer="adam", GPU=0, seed=None, @@ -162,7 +163,7 @@ def metric_fn(self, pred, label): vx = x - torch.mean(x) vy = y - torch.mean(y) - return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2))) + return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx**2)) * torch.sqrt(torch.sum(vy**2))) if self.metric == "" or self.metric == "loss": return -self.loss_fn(pred[mask], label[mask]) @@ -181,7 +182,6 @@ def get_daily_inter(self, df, shuffle=False): daily_index, daily_count = zip(*daily_shuffle) return daily_index, daily_count - def get_train_hidden(self, x_train): x_train_values = x_train.values daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) @@ -195,8 +195,8 @@ def get_train_hidden(self, x_train): out = self.igmtf_model(feature, get_hidden=True) train_hidden.append(out.detach().cpu()) train_hidden_day.append(out.detach().cpu().mean(dim=0).unsqueeze(dim=0)) - - train_hidden = np.asarray(train_hidden, dtype = object) + + train_hidden = np.asarray(train_hidden, dtype=object) train_hidden_day = torch.cat(train_hidden_day) return train_hidden, train_hidden_day @@ -209,12 +209,12 @@ def train_epoch(self, x_train, y_train, train_hidden, train_hidden_day): self.igmtf_model.train() daily_index, daily_count = self.get_daily_inter(x_train, shuffle=True) - + for idx, count in zip(daily_index, daily_count): batch = slice(idx, idx + count) feature = torch.from_numpy(x_train_values[batch]).float().to(self.device) label = torch.from_numpy(y_train_values[batch]).float().to(self.device) - pred = self.igmtf_model(feature, train_hidden = train_hidden, train_hidden_day = train_hidden_day) + pred = self.igmtf_model(feature, train_hidden=train_hidden, train_hidden_day=train_hidden_day) loss = self.loss_fn(pred, label) self.train_optimizer.zero_grad() @@ -240,7 +240,7 @@ def test_epoch(self, data_x, data_y, train_hidden, train_hidden_day): feature = torch.from_numpy(x_values[batch]).float().to(self.device) label = torch.from_numpy(y_values[batch]).float().to(self.device) - pred = self.igmtf_model(feature, train_hidden = train_hidden, train_hidden_day = train_hidden_day) + pred = self.igmtf_model(feature, train_hidden=train_hidden, train_hidden_day=train_hidden_day) loss = self.loss_fn(pred, label) losses.append(loss.item()) @@ -249,7 +249,6 @@ def test_epoch(self, data_x, data_y, train_hidden, train_hidden_day): return np.mean(losses), np.mean(scores) - def fit( self, dataset: DatasetH, @@ -296,7 +295,6 @@ def fit( self.igmtf_model.load_state_dict(model_dict) self.logger.info("Loading pretrained model Done...") - # train self.logger.info("training...") self.fitted = True @@ -350,14 +348,18 @@ def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): x_batch = torch.from_numpy(x_values[batch]).float().to(self.device) with torch.no_grad(): - pred = self.igmtf_model(x_batch, train_hidden = train_hidden, train_hidden_day = train_hidden_day).detach().cpu().numpy() + pred = ( + self.igmtf_model(x_batch, train_hidden=train_hidden, train_hidden_day=train_hidden_day) + .detach() + .cpu() + .numpy() + ) preds.append(pred) return pd.Series(np.concatenate(preds), index=index) - class IGMTFModel(nn.Module): def __init__(self, d_feat=6, hidden_size=64, num_layers=2, dropout=0.0, base_model="GRU"): super().__init__() @@ -382,32 +384,31 @@ def __init__(self, d_feat=6, hidden_size=64, num_layers=2, dropout=0.0, base_mod raise ValueError("unknown base model name `%s`" % base_model) self.lins = nn.Sequential() for i in range(2): - self.lins.add_module("linear"+str(i), nn.Linear(hidden_size, hidden_size)) - self.lins.add_module("leakyrelu"+str(i), nn.LeakyReLU()) - self.fc_output = nn.Linear(hidden_size*2, hidden_size*2) + self.lins.add_module("linear" + str(i), nn.Linear(hidden_size, hidden_size)) + self.lins.add_module("leakyrelu" + str(i), nn.LeakyReLU()) + self.fc_output = nn.Linear(hidden_size * 2, hidden_size * 2) self.project1 = nn.Linear(hidden_size, hidden_size, bias=False) self.project2 = nn.Linear(hidden_size, hidden_size, bias=False) - self.fc_out_pred = nn.Linear(hidden_size*2, 1) + self.fc_out_pred = nn.Linear(hidden_size * 2, 1) self.leaky_relu = nn.LeakyReLU() self.d_feat = d_feat - def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same + def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same xy = x.mm(torch.t(y)) - x_norm = torch.sqrt(torch.sum(x*x, dim =1)).reshape(-1, 1) - y_norm = torch.sqrt(torch.sum(y*y, dim =1)).reshape(-1, 1) - cos_similarity = xy/x_norm.mm(torch.t(y_norm)) + x_norm = torch.sqrt(torch.sum(x * x, dim=1)).reshape(-1, 1) + y_norm = torch.sqrt(torch.sum(y * y, dim=1)).reshape(-1, 1) + cos_similarity = xy / x_norm.mm(torch.t(y_norm)) cos_similarity[cos_similarity != cos_similarity] = 0 return cos_similarity def sparse_dense_mul(self, s, d): i = s._indices() v = s._values() - dv = d[i[0,:], i[1,:]] # get values from relevant entries of dense matrix + dv = d[i[0, :], i[1, :]] # get values from relevant entries of dense matrix return torch.sparse.FloatTensor(i, v * dv, s.size()) - - def forward(self, x, get_hidden=False, train_hidden=None, train_hidden_day = None, k_day = 10, n_neighbor=10): + def forward(self, x, get_hidden=False, train_hidden=None, train_hidden_day=None, k_day=10, n_neighbor=10): # x: [N, F*T] device = x.device x = x.reshape(len(x), self.d_feat, -1) # [N, F, T] @@ -418,7 +419,7 @@ def forward(self, x, get_hidden=False, train_hidden=None, train_hidden_day = Non mini_batch_out = out if get_hidden is True: return mini_batch_out - + mini_batch_out_day = torch.mean(mini_batch_out, dim=0).unsqueeze(0) day_similarity = self.cal_cos_similarity(mini_batch_out_day, train_hidden_day.to(device)) day_index = torch.topk(day_similarity, k_day, dim=1)[1] @@ -426,14 +427,23 @@ def forward(self, x, get_hidden=False, train_hidden=None, train_hidden_day = Non sample_train_hidden = torch.cat(list(sample_train_hidden)).to(device) sample_train_hidden = self.lins(sample_train_hidden) cos_similarity = self.cal_cos_similarity(self.project1(mini_batch_out), self.project2(sample_train_hidden)) - - - row = torch.linspace(0,x.shape[0]-1, x.shape[0]).reshape([-1, 1]).repeat(1, n_neighbor).reshape(1, -1).to(device) - column = torch.topk(cos_similarity, n_neighbor, dim = 1)[1].reshape(1, -1) - mask = torch.sparse_coo_tensor(torch.cat([row, column]), torch.ones([row.shape[1]]).to(device)/n_neighbor, (x.shape[0], sample_train_hidden.shape[0])) + + row = ( + torch.linspace(0, x.shape[0] - 1, x.shape[0]) + .reshape([-1, 1]) + .repeat(1, n_neighbor) + .reshape(1, -1) + .to(device) + ) + column = torch.topk(cos_similarity, n_neighbor, dim=1)[1].reshape(1, -1) + mask = torch.sparse_coo_tensor( + torch.cat([row, column]), + torch.ones([row.shape[1]]).to(device) / n_neighbor, + (x.shape[0], sample_train_hidden.shape[0]), + ) cos_similarity = self.sparse_dense_mul(mask, cos_similarity) agg_out = torch.sparse.mm(cos_similarity, self.project2(sample_train_hidden)) # out = self.fc_out(out).squeeze() out = self.fc_out_pred(torch.cat([mini_batch_out, agg_out], axis=1)).squeeze() - return out \ No newline at end of file + return out From 15c584a83a8346677fe1629b8ecaded767ce09fb Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Tue, 12 Apr 2022 11:28:44 +0800 Subject: [PATCH 6/9] fix pylint bugs --- qlib/contrib/model/pytorch_hist.py | 9 ++++----- qlib/contrib/model/pytorch_igmtf.py | 3 +-- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index b0b413f5138..764d24dbf75 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -16,7 +16,6 @@ import torch import torch.nn as nn import torch.optim as optim -import collections from .pytorch_utils import count_parameters from ...model.base import Model from ...data.dataset import DatasetH @@ -171,7 +170,7 @@ def metric_fn(self, pred, label): vy = y - torch.mean(y) return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx**2)) * torch.sqrt(torch.sum(vy**2))) - if self.metric == "" or self.metric == "loss": + if self.metric == ("", "loss"): return -self.loss_fn(pred[mask], label[mask]) raise ValueError("unknown metric `%s`" % self.metric) @@ -448,8 +447,8 @@ def forward(self, x, concept_matrix): stock_to_concept_sum = stock_to_concept_sum + ( torch.ones(stock_to_concept.shape[0], stock_to_concept.shape[1]).to(device) ) - stock_to_concept = stock_to_concept / stock_to_concept_sum # 股票到tag的权重 - hidden = torch.t(stock_to_concept).mm(x_hidden) # + stock_to_concept = stock_to_concept / stock_to_concept_sum + hidden = torch.t(stock_to_concept).mm(x_hidden) hidden = hidden[hidden.sum(1) != 0] @@ -465,7 +464,7 @@ def forward(self, x, concept_matrix): # Hidden Concept Module i_shared_info = x_hidden - e_shared_back - hidden = i_shared_info # 每个股票都有一个hidden的tag,所以有280个hidden tags。 + hidden = i_shared_info i_stock_to_concept = self.cal_cos_similarity(i_shared_info, hidden) dim = i_stock_to_concept.shape[0] diag = i_stock_to_concept.diagonal(0) diff --git a/qlib/contrib/model/pytorch_igmtf.py b/qlib/contrib/model/pytorch_igmtf.py index 2bf8911f895..bead9f4342a 100644 --- a/qlib/contrib/model/pytorch_igmtf.py +++ b/qlib/contrib/model/pytorch_igmtf.py @@ -5,7 +5,6 @@ from __future__ import division from __future__ import print_function -import os import numpy as np import pandas as pd from typing import Text, Union @@ -165,7 +164,7 @@ def metric_fn(self, pred, label): vy = y - torch.mean(y) return torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx**2)) * torch.sqrt(torch.sum(vy**2))) - if self.metric == "" or self.metric == "loss": + if self.metric == ("", "loss"): return -self.loss_fn(pred[mask], label[mask]) raise ValueError("unknown metric `%s`" % self.metric) From 8827e11d4b6250af650aced2e34a9eea90c86529 Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Wed, 13 Apr 2022 21:32:12 +0800 Subject: [PATCH 7/9] fix the bugs of pylint --- qlib/contrib/model/pytorch_hist.py | 7 +------ qlib/contrib/model/pytorch_igmtf.py | 3 +-- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index 764d24dbf75..f71ca7a9d01 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -88,7 +88,6 @@ def __init__( "\nd_feat : {}" "\nhidden_size : {}" "\nnum_layers : {}" - "\ndropout : {}" "\nn_epochs : {}" "\nlr : {}" "\nmetric : {}" @@ -97,13 +96,11 @@ def __init__( "\nloss_type : {}" "\nbase_model : {}" "\nstock2concept : {}" - "\nstock_index : {}" "\nuse_GPU : {}" "\nseed : {}".format( d_feat, hidden_size, num_layers, - dropout, n_epochs, lr, metric, @@ -113,7 +110,6 @@ def __init__( base_model, model_path, stock2concept, - stock_index, GPU, seed, ) @@ -425,8 +421,7 @@ def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same xy = x.mm(torch.t(y)) x_norm = torch.sqrt(torch.sum(x * x, dim=1)).reshape(-1, 1) y_norm = torch.sqrt(torch.sum(y * y, dim=1)).reshape(-1, 1) - cos_similarity = xy / x_norm.mm(torch.t(y_norm)) - cos_similarity[cos_similarity != cos_similarity] = 0 + cos_similarity = xy / (x_norm.mm(torch.t(y_norm)) + 1e-6) return cos_similarity def forward(self, x, concept_matrix): diff --git a/qlib/contrib/model/pytorch_igmtf.py b/qlib/contrib/model/pytorch_igmtf.py index bead9f4342a..854172ef109 100644 --- a/qlib/contrib/model/pytorch_igmtf.py +++ b/qlib/contrib/model/pytorch_igmtf.py @@ -397,8 +397,7 @@ def cal_cos_similarity(self, x, y): # the 2nd dimension of x and y are the same xy = x.mm(torch.t(y)) x_norm = torch.sqrt(torch.sum(x * x, dim=1)).reshape(-1, 1) y_norm = torch.sqrt(torch.sum(y * y, dim=1)).reshape(-1, 1) - cos_similarity = xy / x_norm.mm(torch.t(y_norm)) - cos_similarity[cos_similarity != cos_similarity] = 0 + cos_similarity = xy / (x_norm.mm(torch.t(y_norm)) + 1e-6) return cos_similarity def sparse_dense_mul(self, s, d): From 6b2370dbfbe4dafbc95265a9fed892975e73edb6 Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Wed, 13 Apr 2022 22:27:09 +0800 Subject: [PATCH 8/9] fix pylint bugs --- qlib/contrib/model/pytorch_hist.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index f71ca7a9d01..cb8af01d2a1 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -88,6 +88,7 @@ def __init__( "\nd_feat : {}" "\nhidden_size : {}" "\nnum_layers : {}" + "\ndropout : {}" "\nn_epochs : {}" "\nlr : {}" "\nmetric : {}" @@ -95,12 +96,15 @@ def __init__( "\noptimizer : {}" "\nloss_type : {}" "\nbase_model : {}" + "\nmodel_path : {}" "\nstock2concept : {}" + "\nstock_index : {}" "\nuse_GPU : {}" "\nseed : {}".format( d_feat, hidden_size, num_layers, + dropout, n_epochs, lr, metric, @@ -110,6 +114,7 @@ def __init__( base_model, model_path, stock2concept, + stock_index, GPU, seed, ) From b1eb89455495b6d190b5af51eefd6f87a43c5abc Mon Sep 17 00:00:00 2001 From: Wentao-Xu Date: Wed, 13 Apr 2022 22:35:46 +0800 Subject: [PATCH 9/9] fix flake8 --- qlib/contrib/model/pytorch_hist.py | 1 - qlib/contrib/model/pytorch_igmtf.py | 1 - 2 files changed, 2 deletions(-) diff --git a/qlib/contrib/model/pytorch_hist.py b/qlib/contrib/model/pytorch_hist.py index cb8af01d2a1..020534575ef 100644 --- a/qlib/contrib/model/pytorch_hist.py +++ b/qlib/contrib/model/pytorch_hist.py @@ -493,7 +493,6 @@ def forward(self, x, concept_matrix): output_indi = individual_info output_indi = self.fc_indi(output_indi) output_indi = self.leaky_relu(output_indi) - pred_indi = self.fc_out_indi(output_indi).squeeze() # Stock Trend Prediction all_info = output_es + output_is + output_indi diff --git a/qlib/contrib/model/pytorch_igmtf.py b/qlib/contrib/model/pytorch_igmtf.py index 854172ef109..e3a07c3417f 100644 --- a/qlib/contrib/model/pytorch_igmtf.py +++ b/qlib/contrib/model/pytorch_igmtf.py @@ -337,7 +337,6 @@ def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"): index = x_test.index self.igmtf_model.eval() x_values = x_test.values - sample_num = x_values.shape[0] preds = [] daily_index, daily_count = self.get_daily_inter(x_test, shuffle=False)