Skip to content

Commit 3437583

Browse files
committed
m
1 parent 1f18c52 commit 3437583

File tree

1 file changed

+26
-7
lines changed

1 file changed

+26
-7
lines changed

Self_Tracking/play_books_greedy_optimal.py

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,25 @@
1616
from typing import Dict, List
1717

1818
# ---------------- Parameters ----------------
19-
READ_TIME_HOURS = 3.5
20-
SEARCH_COST_HOURS = 0.25
21-
PARTIAL_RATING_PENALTY = 0.2
22-
VALUE_BASE = 1.75
19+
READ_TIME_HOURS = 3.5 # full reading time
20+
SEARCH_COST_HOURS = 0.25 # discovery cost of getting a new book
21+
PARTIAL_RATING_PENALTY = 0.1 # rating loss when abandoning, assumed utility loss linear
22+
VALUE_BASE = 1.75 # utility = VALUE_BASE ** rating
23+
24+
# Quit levels
25+
QUIT_TABLE = { # might 28, wont 39 is slightly off from other time I did counts but I'm exactly sure on number anyway
26+
"Business, management": {"finished": 44, "might finish": 5, "wont finish": 6},
27+
"Computer Science": {"finished": 14, "might finish": 6, "wont finish": 10},
28+
"fiction": {"finished": 51, "might finish": 0, "wont finish": 6},
29+
"General Reading": {"finished": 40, "might finish": 5, "wont finish": 7},
30+
"Literature": {"finished": 50, "might finish": 3, "wont finish": 6},
31+
"Machine Learning": {"finished": 5, "might finish": 3, "wont finish": 2},
32+
"Math": {"finished": 3, "might finish": 6, "wont finish": 2},
33+
}
34+
QUIT_USEFULNESS = 1.2
35+
QUIT_ENJOYMENT = 1.4
36+
# I quit some books since mid or bored, not because I hated them
37+
QUIT_AT_FRACTION = 0.15 # but this would vary a lot?
2338

2439
# Static: O(D*F)
2540
F_GRID = np.concatenate(
@@ -74,7 +89,11 @@ def error_sigma(f):
7489

7590

7691
def error_sigma2(f):
77-
return 1 - f**0.5
92+
# return 1 - f**0.5
93+
# if had as much info at 1/3 as do at end
94+
# m =(0.6-2.5)/f
95+
return max(2.5 - 5.7 * f, 0.6)
96+
# but how does this interact with the AR(1) process?
7897

7998

8099
def simulate_estimates(true_ratings: np.ndarray, error_fn=error_sigma, rho=0.9) -> np.ndarray:
@@ -496,7 +515,7 @@ def plot_simulation_paths(
496515
print(f"Final utility: {best_u:.2f} , current: {current_u:.2f}")
497516
print(f"Final Rating: {best_r:.2f} , current: {current_r:.2f}")
498517
# %%
499-
# Dynamic where check all options: D^F: 100B here
518+
# Dynamic where check all options: D^F: 100M here at 8**9
500519
F_GRID = np.concatenate(
501520
[
502521
np.arange(0.01, 0.4, 0.08), # more precise in first half
@@ -506,6 +525,6 @@ def plot_simulation_paths(
506525
D_GRID = np.concatenate(
507526
[
508527
np.arange(0.00, 0.10, 0.02), # dropping up to 30% in 1 step. Depends on F_GRID size
509-
np.arange(0.10, 0.31, 0.07),
528+
np.arange(0.0, 0.21, 0.07),
510529
]
511530
)

0 commit comments

Comments
 (0)