Skip to content

Commit 2be918e

Browse files
fix some typo in doc/comments (microsoft#1389)
* fix typo in docstrings * fix typo * fix typo * fix black lint * fix black lint
1 parent 76c5328 commit 2be918e

File tree

6 files changed

+25
-21
lines changed

6 files changed

+25
-21
lines changed

qlib/contrib/model/pytorch_adarnn.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __init__(
5656
n_splits=2,
5757
GPU=0,
5858
seed=None,
59-
**kwargs
59+
**_
6060
):
6161
# Set logger.
6262
self.logger = get_module_logger("ADARNN")
@@ -81,7 +81,7 @@ def __init__(
8181
self.optimizer = optimizer.lower()
8282
self.loss = loss
8383
self.n_splits = n_splits
84-
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
84+
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
8585
self.seed = seed
8686

8787
self.logger.info(
@@ -213,7 +213,8 @@ def train_AdaRNN(self, train_loader_list, epoch, dist_old=None, weight_mat=None)
213213
weight_mat = self.transform_type(out_weight_list)
214214
return weight_mat, None
215215

216-
def calc_all_metrics(self, pred):
216+
@staticmethod
217+
def calc_all_metrics(pred):
217218
"""pred is a pandas dataframe that has two attributes: score (pred) and label (real)"""
218219
res = {}
219220
ic = pred.groupby(level="datetime").apply(lambda x: x.label.corr(x.score))
@@ -259,8 +260,6 @@ def fit(
259260

260261
save_path = get_or_create_path(save_path)
261262
stop_steps = 0
262-
best_score = -np.inf
263-
best_epoch = 0
264263
evals_result["train"] = []
265264
evals_result["valid"] = []
266265

@@ -400,7 +399,7 @@ def __init__(
400399
self.model_type = model_type
401400
self.trans_loss = trans_loss
402401
self.len_seq = len_seq
403-
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
402+
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
404403
in_size = self.n_input
405404

406405
features = nn.ModuleList()
@@ -499,7 +498,8 @@ def process_gate_weight(self, out, index):
499498
res = self.softmax(weight).squeeze()
500499
return res
501500

502-
def get_features(self, output_list):
501+
@staticmethod
502+
def get_features(output_list):
503503
fea_list_src, fea_list_tar = [], []
504504
for fea in output_list:
505505
fea_list_src.append(fea[0 : fea.size(0) // 2])
@@ -561,7 +561,7 @@ def __init__(self, loss_type="cosine", input_dim=512, GPU=0):
561561
"""
562562
self.loss_type = loss_type
563563
self.input_dim = input_dim
564-
self.device = torch.device("cuda:%d" % (GPU) if torch.cuda.is_available() and GPU >= 0 else "cpu")
564+
self.device = torch.device("cuda:%d" % GPU if torch.cuda.is_available() and GPU >= 0 else "cpu")
565565

566566
def compute(self, X, Y):
567567
"""Compute adaptation loss
@@ -676,7 +676,8 @@ def __init__(self, kernel_type="linear", kernel_mul=2.0, kernel_num=5):
676676
self.fix_sigma = None
677677
self.kernel_type = kernel_type
678678

679-
def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
679+
@staticmethod
680+
def guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
680681
n_samples = int(source.size()[0]) + int(target.size()[0])
681682
total = torch.cat([source, target], dim=0)
682683
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
@@ -691,7 +692,8 @@ def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigm
691692
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
692693
return sum(kernel_val)
693694

694-
def linear_mmd(self, X, Y):
695+
@staticmethod
696+
def linear_mmd(X, Y):
695697
delta = X.mean(axis=0) - Y.mean(axis=0)
696698
loss = delta.dot(delta.T)
697699
return loss

qlib/contrib/strategy/signal_strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,7 @@ def get_risk_data(self, date):
428428
specific_risk = load_dataset(root + "/" + self.specific_risk_path, index_col=[0])
429429

430430
if not factor_exp.index.equals(specific_risk.index):
431-
# NOTE: for stocks missing specific_risk, we always assume it have the highest volatility
431+
# NOTE: for stocks missing specific_risk, we always assume it has the highest volatility
432432
specific_risk = specific_risk.reindex(factor_exp.index, fill_value=specific_risk.max())
433433

434434
universe = factor_exp.index.tolist()

qlib/model/riskmodel/structured.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class StructuredCovEstimator(RiskModel):
1818
`B` is the regression coefficients matrix for all observations (row) on
1919
all factors (columns), and `U` is the residual matrix with shape like `X`.
2020
21-
Therefore the structured covariance can be estimated by
21+
Therefore, the structured covariance can be estimated by
2222
cov(X.T) = F @ cov(B.T) @ F.T + diag(var(U))
2323
2424
In finance domain, there are mainly three methods to design `F` [1][2]:

qlib/workflow/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def search_records(self, experiment_ids, **kwargs):
155155
156156
The arguments of this function are not set to be rigid, and they will be different with different implementation of
157157
``ExpManager`` in ``Qlib``. ``Qlib`` now provides an implementation of ``ExpManager`` with mlflow, and here is the
158-
example code of the this method with the ``MLflowExpManager``:
158+
example code of the method with the ``MLflowExpManager``:
159159
160160
.. code-block:: Python
161161

qlib/workflow/record_temp.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,8 @@ class RecordTemp:
3030
"""
3131

3232
artifact_path = None
33-
depend_cls = None # the depend class of the record; the record will depend on the results generated by `depend_cls`
33+
depend_cls = None # the dependant class of the record; the record will depend on the results generated by
34+
# `depend_cls`
3435

3536
@classmethod
3637
def get_path(cls, path=None):
@@ -119,7 +120,7 @@ def check(self, include_self: bool = False, parents: bool = True):
119120
Check if the records is properly generated and saved.
120121
It is useful in following examples
121122
122-
- checking if the depended files complete before generating new things.
123+
- checking if the dependant files complete before generating new things.
123124
- checking if the final files is completed
124125
125126
Parameters
@@ -186,7 +187,7 @@ def generate_label(dataset):
186187
return raw_label
187188

188189
def generate(self, **kwargs):
189-
# generate prediciton
190+
# generate prediction
190191
pred = self.model.predict(self.dataset)
191192
if isinstance(pred, pd.Series):
192193
pred = pred.to_frame("score")
@@ -285,7 +286,8 @@ def list(self):
285286

286287
class SigAnaRecord(ACRecordTemp):
287288
"""
288-
This is the Signal Analysis Record class that generates the analysis results such as IC and IR. This class inherits the ``RecordTemp`` class.
289+
This is the Signal Analysis Record class that generates the analysis results such as IC and IR.
290+
This class inherits the ``RecordTemp`` class.
289291
"""
290292

291293
artifact_path = "sig_analysis"
@@ -382,7 +384,7 @@ def __init__(
382384
indicator_analysis_freq : str|List[str]
383385
indicator analysis freq of report
384386
indicator_analysis_method : str, optional, default by None
385-
the candidated values include 'mean', 'amount_weighted', 'value_weighted'
387+
the candidate values include 'mean', 'amount_weighted', 'value_weighted'
386388
"""
387389
super().__init__(recorder=recorder, skip_existing=skip_existing, **kwargs)
388390

@@ -456,9 +458,9 @@ def _generate(self, **kwargs):
456458
pred = self.load("pred.pkl")
457459

458460
# replace the "<PRED>" with prediction saved before
459-
placehorder_value = {"<PRED>": pred}
461+
placeholder_value = {"<PRED>": pred}
460462
for k in "executor_config", "strategy_config":
461-
setattr(self, k, fill_placeholder(getattr(self, k), placehorder_value))
463+
setattr(self, k, fill_placeholder(getattr(self, k), placeholder_value))
462464

463465
# if the backtesting time range is not set, it will automatically extract time range from the prediction file
464466
dt_values = pred.index.get_level_values("datetime")

scripts/data_collector/pit/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ cd qlib/scripts/data_collector/pit/
1919
python collector.py download_data --source_dir ~/.qlib/stock_data/source/pit --start 2000-01-01 --end 2020-01-01 --interval quarterly
2020
```
2121

22-
Downloading all data from the stock is very time consuming. If you just want run a quick test on a few stocks, you can run the command below
22+
Downloading all data from the stock is very time-consuming. If you just want to run a quick test on a few stocks, you can run the command below
2323
```bash
2424
python collector.py download_data --source_dir ~/.qlib/stock_data/source/pit --start 2000-01-01 --end 2020-01-01 --interval quarterly --symbol_regex "^(600519|000725).*"
2525
```

0 commit comments

Comments
 (0)