Skip to content

Commit 1ef8e61

Browse files
authored
fix_pylint_for_CI (microsoft#1119)
* fix_pylint_for_CI * reformat_with_black * fix_pylint_C3001 * fix_flake8_error
1 parent 1a4114b commit 1ef8e61

File tree

11 files changed

+40
-15
lines changed

11 files changed

+40
-15
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ jobs:
7272
run: |
7373
pip install --upgrade pip
7474
pip install pylint
75-
pylint --disable=C0104,C0114,C0115,C0116,C0301,C0302,C0411,C0413,C1802,R0201,R0401,R0801,R0902,R0903,R0911,R0912,R0913,R0914,R0915,R1720,W0105,W0123,W0201,W0511,W0613,W1113,W1514,E0401,E1121,C0103,C0209,R0402,R1705,R1710,R1725,R1735,W0102,W0212,W0221,W0223,W0231,W0237,W0612,W0621,W0622,W0703,W1309,E1102,E1136 --const-rgx='[a-z_][a-z0-9_]{2,30}$' qlib --init-hook "import astroid; astroid.context.InferenceContext.max_inferred = 500"
75+
pylint --disable=C0104,C0114,C0115,C0116,C0301,C0302,C0411,C0413,C1802,R0401,R0801,R0902,R0903,R0911,R0912,R0913,R0914,R0915,R1720,W0105,W0123,W0201,W0511,W0613,W1113,W1514,E0401,E1121,C0103,C0209,R0402,R1705,R1710,R1725,R1735,W0102,W0212,W0221,W0223,W0231,W0237,W0612,W0621,W0622,W0703,W1309,E1102,E1136 --const-rgx='[a-z_][a-z0-9_]{2,30}$' qlib --init-hook "import astroid; astroid.context.InferenceContext.max_inferred = 500"
7676
7777
# The following flake8 error codes were ignored:
7878
# E501 line too long

qlib/contrib/data/dataset.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,20 @@ def _get_date_parse_fn(target):
6363
get_date_parse_fn(20120101)('2017-01-01') => 20170101
6464
"""
6565
if isinstance(target, int):
66-
_fn = lambda x: int(str(x).replace("-", "")[:8]) # 20200201
66+
67+
def _fn(x):
68+
return int(str(x).replace("-", "")[:8]) # 20200201
69+
6770
elif isinstance(target, str) and len(target) == 8:
68-
_fn = lambda x: str(x).replace("-", "")[:8] # '20200201'
71+
72+
def _fn(x):
73+
return str(x).replace("-", "")[:8] # '20200201'
74+
6975
else:
70-
_fn = lambda x: x # '2021-01-01'
76+
77+
def _fn(x):
78+
return x # '2021-01-01'
79+
7180
return _fn
7281

7382

qlib/contrib/data/handler.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,10 @@ def parse_config_to_fields(config):
255255
exclude = config["rolling"].get("exclude", [])
256256
# `exclude` in dataset config unnecessary filed
257257
# `include` in dataset config necessary field
258-
use = lambda x: x not in exclude and (include is None or x in include)
258+
259+
def use(x):
260+
return x not in exclude and (include is None or x in include)
261+
259262
if use("ROC"):
260263
fields += ["Ref($close, %d)/$close" % d for d in windows]
261264
names += ["ROC%d" % d for d in windows]

qlib/contrib/eva/alpha.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,9 @@ def calc_long_short_prec(
4848

4949
group = df.groupby(level=date_col)
5050

51-
N = lambda x: int(len(x) * quantile)
51+
def N(x):
52+
return int(len(x) * quantile)
53+
5254
# find the top/low quantile of prediction and treat them as long and short target
5355
long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label).reset_index(level=0, drop=True)
5456
short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label).reset_index(level=0, drop=True)
@@ -98,7 +100,10 @@ def calc_long_short_return(
98100
if dropna:
99101
df.dropna(inplace=True)
100102
group = df.groupby(level=date_col)
101-
N = lambda x: int(len(x) * quantile)
103+
104+
def N(x):
105+
return int(len(x) * quantile)
106+
102107
r_long = group.apply(lambda x: x.nlargest(N(x), columns="pred").label.mean())
103108
r_short = group.apply(lambda x: x.nsmallest(N(x), columns="pred").label.mean())
104109
r_avg = group.label.mean()

qlib/contrib/meta/data_selection/dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ def _prepare_meta_ipt(self, task):
290290
ic_df = self.internal_data.data_ic_df
291291

292292
segs = task["dataset"]["kwargs"]["segments"]
293-
end = max([segs[k][1] for k in ("train", "valid") if k in segs])
293+
end = max(segs[k][1] for k in ("train", "valid") if k in segs)
294294
ic_df_avail = ic_df.loc[:end, pd.IndexSlice[:, :end]]
295295

296296
# meta data set focus on the **information** instead of preprocess

qlib/contrib/model/highfreq_gdbt_model.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,10 @@ def _prepare_data(self, dataset: DatasetH):
9292
# Convert label into alpha
9393
df_train["label"][l_name] = df_train["label"][l_name] - df_train["label"][l_name].mean(level=0)
9494
df_valid["label"][l_name] = df_valid["label"][l_name] - df_valid["label"][l_name].mean(level=0)
95-
mapping_fn = lambda x: 0 if x < 0 else 1
95+
96+
def mapping_fn(x):
97+
return 0 if x < 0 else 1
98+
9699
df_train["label_c"] = df_train["label"][l_name].apply(mapping_fn)
97100
df_valid["label_c"] = df_valid["label"][l_name].apply(mapping_fn)
98101
x_train, y_train = df_train["feature"], df_train["label_c"].values

qlib/contrib/model/pytorch_hist.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,9 @@ def fit(
292292
pretrained_model.load_state_dict(torch.load(self.model_path))
293293

294294
model_dict = self.HIST_model.state_dict()
295-
pretrained_dict = {k: v for k, v in pretrained_model.state_dict().items() if k in model_dict}
295+
pretrained_dict = {
296+
k: v for k, v in pretrained_model.state_dict().items() if k in model_dict # pylint: disable=E1135
297+
}
296298
model_dict.update(pretrained_dict)
297299
self.HIST_model.load_state_dict(model_dict)
298300
self.logger.info("Loading pretrained model Done...")

qlib/contrib/model/pytorch_tra.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -167,8 +167,8 @@ def _init_model(self):
167167
for param in self.tra.predictors.parameters():
168168
param.requires_grad_(False)
169169

170-
self.logger.info("# model params: %d" % sum([p.numel() for p in self.model.parameters() if p.requires_grad]))
171-
self.logger.info("# tra params: %d" % sum([p.numel() for p in self.tra.parameters() if p.requires_grad]))
170+
self.logger.info("# model params: %d" % sum(p.numel() for p in self.model.parameters() if p.requires_grad))
171+
self.logger.info("# tra params: %d" % sum(p.numel() for p in self.tra.parameters() if p.requires_grad))
172172

173173
self.optimizer = optim.Adam(list(self.model.parameters()) + list(self.tra.parameters()), lr=self.lr)
174174

qlib/data/dataset/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ def build_index(data: pd.DataFrame) -> Tuple[pd.DataFrame, dict]:
438438

439439
@property
440440
def empty(self):
441-
return self.__len__() == 0
441+
return len(self) == 0
442442

443443
def _get_indices(self, row: int, col: int) -> np.array:
444444
"""

qlib/rl/utils/data_queue.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def __del__(self):
145145
def __iter__(self):
146146
if not self._activated:
147147
raise ValueError(
148-
"Need to call activate() to launch a daemon worker " "to produce data into data queue before using it."
148+
"Need to call activate() to launch a daemon worker to produce data into data queue before using it."
149149
)
150150
return self._consumer()
151151

0 commit comments

Comments
 (0)