Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions qlib/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def set_conf_from_C(self, config_c):
"redis_port": 6379,
"redis_task_db": 1,
# This value can be reset via qlib.init
"logging_level": "INFO",
"logging_level": logging.INFO,
# Global configuration of qlib log
# logging_level can control the logging level more finely
"logging_config": {
Expand All @@ -124,12 +124,12 @@ def set_conf_from_C(self, config_c):
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"level": logging.DEBUG,
"formatter": "logger_format",
"filters": ["field_not_found"],
}
},
"loggers": {"qlib": {"level": "DEBUG", "handlers": ["console"]}},
"loggers": {"qlib": {"level": logging.DEBUG, "handlers": ["console"]}},
},
# Defatult config for experiment manager
"exp_manager": {
Expand Down Expand Up @@ -185,7 +185,7 @@ def set_conf_from_C(self, config_c):
# The nfs should be auto-mounted by qlib on other
# serversS(such as PAI) [auto_mount:True]
"timeout": 100,
"logging_level": "INFO",
"logging_level": logging.INFO,
"region": REG_CN,
## Custom Operator
"custom_ops": [],
Expand Down
2 changes: 1 addition & 1 deletion qlib/contrib/model/double_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def feature_selection(self, df_train, loss_values):
/ M
)
loss_feat = self.get_loss(y_train.values.squeeze(), pred.values)
g.loc[i_f, "g_value"] = np.mean(loss_feat - loss_values) / np.std(loss_feat - loss_values)
g.loc[i_f, "g_value"] = np.mean(loss_feat - loss_values) / (np.std(loss_feat - loss_values) + 1e-7)
x_train_tmp.loc[:, feat] = x_train.loc[:, feat].copy()

# one column in train features is all-nan # if g['g_value'].isna().any()
Expand Down
11 changes: 7 additions & 4 deletions qlib/contrib/model/pytorch_alstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand All @@ -39,8 +40,8 @@ class ALSTM(Model):
the evaluate metric used in early stop
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
GPU : int
the GPU ID used for training
"""

def __init__(
Expand Down Expand Up @@ -76,7 +77,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.use_gpu = torch.cuda.is_available()
self.seed = seed

Expand Down Expand Up @@ -123,6 +124,9 @@ def __init__(
num_layers=self.num_layers,
dropout=self.dropout,
)
self.logger.info("model:\n{:}".format(self.ALSTM_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.ALSTM_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.ALSTM_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -214,7 +218,6 @@ def fit(
self,
dataset: DatasetH,
evals_result=dict(),
verbose=True,
save_path=None,
):

Expand Down
13 changes: 8 additions & 5 deletions qlib/contrib/model/pytorch_alstm_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import torch.optim as optim
from torch.utils.data import DataLoader

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH, TSDatasetH
from ...data.dataset.handler import DataHandlerLP
Expand All @@ -40,8 +41,8 @@ class ALSTM(Model):
the evaluate metric used in early stop
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
GPU : int
the GPU ID used for training
"""

def __init__(
Expand Down Expand Up @@ -78,7 +79,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.n_jobs = n_jobs
self.use_gpu = torch.cuda.is_available()
self.seed = seed
Expand Down Expand Up @@ -127,7 +128,10 @@ def __init__(
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
).to(self.device)
)
self.logger.info("model:\n{:}".format(self.ALSTM_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.ALSTM_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.ALSTM_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -201,7 +205,6 @@ def fit(
self,
dataset,
evals_result=dict(),
verbose=True,
save_path=None,
):
dl_train = dataset.prepare("train", col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
Expand Down
11 changes: 7 additions & 4 deletions qlib/contrib/model/pytorch_gats.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand All @@ -42,8 +43,8 @@ class GATs(Model):
the evaluate metric used in early stop
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
GPU : int
the GPU ID used for training
"""

def __init__(
Expand Down Expand Up @@ -83,7 +84,7 @@ def __init__(
self.base_model = base_model
self.with_pretrain = with_pretrain
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.use_gpu = torch.cuda.is_available()
self.seed = seed

Expand Down Expand Up @@ -135,6 +136,9 @@ def __init__(
dropout=self.dropout,
base_model=self.base_model,
)
self.logger.info("model:\n{:}".format(self.GAT_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.GAT_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.GAT_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -232,7 +236,6 @@ def fit(
self,
dataset: DatasetH,
evals_result=dict(),
verbose=True,
save_path=None,
):

Expand Down
11 changes: 7 additions & 4 deletions qlib/contrib/model/pytorch_gats_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from torch.utils.data import DataLoader
from torch.utils.data import Sampler

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -62,8 +63,8 @@ class GATs(Model):
the evaluate metric used in early stop
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
GPU : int
the GPU ID used for training
"""

def __init__(
Expand Down Expand Up @@ -104,7 +105,7 @@ def __init__(
self.base_model = base_model
self.with_pretrain = with_pretrain
self.model_path = model_path
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.n_jobs = n_jobs
self.use_gpu = torch.cuda.is_available()
self.seed = seed
Expand Down Expand Up @@ -157,6 +158,9 @@ def __init__(
dropout=self.dropout,
base_model=self.base_model,
)
self.logger.info("model:\n{:}".format(self.GAT_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.GAT_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.GAT_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -245,7 +249,6 @@ def fit(
self,
dataset,
evals_result=dict(),
verbose=True,
save_path=None,
):

Expand Down
7 changes: 5 additions & 2 deletions qlib/contrib/model/pytorch_gru.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -76,7 +77,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.use_gpu = torch.cuda.is_available()
self.seed = seed

Expand Down Expand Up @@ -123,6 +124,9 @@ def __init__(
num_layers=self.num_layers,
dropout=self.dropout,
)
self.logger.info("model:\n{:}".format(self.gru_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.gru_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.gru_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -214,7 +218,6 @@ def fit(
self,
dataset: DatasetH,
evals_result=dict(),
verbose=True,
save_path=None,
):

Expand Down
9 changes: 6 additions & 3 deletions qlib/contrib/model/pytorch_gru_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import torch.optim as optim
from torch.utils.data import DataLoader

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH, TSDatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -78,7 +79,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.n_jobs = n_jobs
self.use_gpu = torch.cuda.is_available()
self.seed = seed
Expand Down Expand Up @@ -127,7 +128,10 @@ def __init__(
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
).to(self.device)
)
self.logger.info("model:\n{:}".format(self.gru_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.gru_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.GRU_model.parameters(), lr=self.lr)
elif optimizer.lower() == "gd":
Expand Down Expand Up @@ -201,7 +205,6 @@ def fit(
self,
dataset,
evals_result=dict(),
verbose=True,
save_path=None,
):
dl_train = dataset.prepare("train", col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
Expand Down
3 changes: 1 addition & 2 deletions qlib/contrib/model/pytorch_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.use_gpu = torch.cuda.is_available()
self.seed = seed

Expand Down Expand Up @@ -214,7 +214,6 @@ def fit(
self,
dataset: DatasetH,
evals_result=dict(),
verbose=True,
save_path=None,
):

Expand Down
3 changes: 1 addition & 2 deletions qlib/contrib/model/pytorch_lstm_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def __init__(
self.early_stop = early_stop
self.optimizer = optimizer.lower()
self.loss = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.n_jobs = n_jobs
self.use_gpu = torch.cuda.is_available()
self.seed = seed
Expand Down Expand Up @@ -201,7 +201,6 @@ def fit(
self,
dataset,
evals_result=dict(),
verbose=True,
save_path=None,
):
dl_train = dataset.prepare("train", col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
Expand Down
10 changes: 7 additions & 3 deletions qlib/contrib/model/pytorch_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import torch.nn as nn
import torch.optim as optim

from .pytorch_utils import count_parameters
from ...model.base import Model
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
Expand Down Expand Up @@ -42,8 +43,8 @@ class DNNModelPytorch(Model):
learning rate decay steps
optimizer : str
optimizer name
GPU : str
the GPU ID(s) used for training
GPU : int
the GPU ID used for training
"""

def __init__(
Expand Down Expand Up @@ -80,7 +81,7 @@ def __init__(
self.lr_decay_steps = lr_decay_steps
self.optimizer = optimizer.lower()
self.loss_type = loss
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() else "cpu")
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
self.use_GPU = torch.cuda.is_available()
self.seed = seed
self.weight_decay = weight_decay
Expand Down Expand Up @@ -129,6 +130,9 @@ def __init__(
self._scorer = mean_squared_error if loss == "mse" else roc_auc_score

self.dnn_model = Net(input_dim, output_dim, layers, loss=self.loss_type)
self.logger.info("model:\n{:}".format(self.dnn_model))
self.logger.info("model size: {:.4f} MB".format(count_parameters(self.dnn_model)))

if optimizer.lower() == "adam":
self.train_optimizer = optim.Adam(self.dnn_model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
elif optimizer.lower() == "gd":
Expand Down
Loading