-
Notifications
You must be signed in to change notification settings - Fork 89
/
base.py
230 lines (179 loc) · 7.05 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Base Interfaces for Bandit Algorithms."""
from abc import ABCMeta
from abc import abstractmethod
from dataclasses import dataclass
from typing import Optional
import numpy as np
from sklearn.utils import check_random_state
from sklearn.utils import check_scalar
from .policy_type import PolicyType
@dataclass
class BaseContextFreePolicy(metaclass=ABCMeta):
"""Base class for context-free bandit policies.
Parameters
----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
batch_size: int, default=1
Number of samples used in a batch parameter update.
random_state: int, default=None
Controls the random seed in sampling actions.
"""
n_actions: int
len_list: int = 1
batch_size: int = 1
random_state: Optional[int] = None
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(self.n_actions, "n_actions", int, min_val=2)
check_scalar(self.len_list, "len_list", int, min_val=1, max_val=self.n_actions)
check_scalar(self.batch_size, "batch_size", int, min_val=1)
self.n_trial = 0
self.random_ = check_random_state(self.random_state)
self.action_counts = np.zeros(self.n_actions, dtype=int)
self.action_counts_temp = np.zeros(self.n_actions, dtype=int)
self.reward_counts_temp = np.zeros(self.n_actions)
self.reward_counts = np.zeros(self.n_actions)
@property
def policy_type(self) -> PolicyType:
"""Type of the bandit policy."""
return PolicyType.CONTEXT_FREE
def initialize(self) -> None:
"""Initialize Parameters."""
self.n_trial = 0
self.random_ = check_random_state(self.random_state)
self.action_counts = np.zeros(self.n_actions, dtype=int)
self.action_counts_temp = np.zeros(self.n_actions, dtype=int)
self.reward_counts_temp = np.zeros(self.n_actions)
self.reward_counts = np.zeros(self.n_actions)
@abstractmethod
def select_action(self) -> np.ndarray:
"""Select a list of actions."""
raise NotImplementedError
@abstractmethod
def update_params(self, action: int, reward: float) -> None:
"""Update policy parameters."""
raise NotImplementedError
@dataclass
class BaseContextualPolicy(metaclass=ABCMeta):
"""Base class for contextual bandit policies.
Parameters
----------
dim: int
Number of dimensions of context vectors.
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
batch_size: int, default=1
Number of samples used in a batch parameter update.
random_state: int, default=None
Controls the random seed in sampling actions.
"""
dim: int
n_actions: int
len_list: int = 1
batch_size: int = 1
random_state: Optional[int] = None
def __post_init__(self) -> None:
"""Initialize class."""
check_scalar(self.dim, "dim", int, min_val=1)
check_scalar(self.n_actions, "n_actions", int, min_val=2)
check_scalar(self.len_list, "len_list", int, min_val=1, max_val=self.n_actions)
check_scalar(self.batch_size, "batch_size", int, min_val=1)
self.n_trial = 0
self.random_ = check_random_state(self.random_state)
self.action_counts = np.zeros(self.n_actions, dtype=int)
self.reward_lists = [[] for _ in np.arange(self.n_actions)]
self.context_lists = [[] for _ in np.arange(self.n_actions)]
@property
def policy_type(self) -> PolicyType:
"""Type of the bandit policy."""
return PolicyType.CONTEXTUAL
def initialize(self) -> None:
"""Initialize policy parameters."""
self.n_trial = 0
self.random_ = check_random_state(self.random_state)
self.action_counts = np.zeros(self.n_actions, dtype=int)
self.reward_lists = [[] for _ in np.arange(self.n_actions)]
self.context_lists = [[] for _ in np.arange(self.n_actions)]
@abstractmethod
def select_action(self, context: np.ndarray) -> np.ndarray:
"""Select a list of actions."""
raise NotImplementedError
@abstractmethod
def update_params(self, action: float, reward: float, context: np.ndarray) -> None:
"""Update policy parameters."""
raise NotImplementedError
@dataclass
class BaseOfflinePolicyLearner(metaclass=ABCMeta):
"""Base class for off-policy learners.
Parameters
-----------
n_actions: int
Number of actions.
len_list: int, default=1
Length of a list of actions in a recommendation/ranking inferface, slate size.
When Open Bandit Dataset is used, 3 should be set.
"""
n_actions: int
len_list: int = 1
def __post_init__(self) -> None:
"""Initialize class."""
check_scalar(self.n_actions, "n_actions", int, min_val=2)
check_scalar(self.len_list, "len_list", int, min_val=1, max_val=self.n_actions)
@property
def policy_type(self) -> PolicyType:
"""Type of the bandit policy."""
return PolicyType.OFFLINE
@abstractmethod
def fit(
self,
) -> None:
"""Fits an offline bandit policy on the given logged bandit data."""
raise NotImplementedError
@abstractmethod
def predict(self, context: np.ndarray) -> np.ndarray:
"""Predict best action for new data.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
action: array-like, shape (n_rounds_of_new_data, n_actions, len_list)
Action choices made by a policy trained by calling the `fit` method.
"""
raise NotImplementedError
@dataclass
class BaseContinuousOfflinePolicyLearner(metaclass=ABCMeta):
"""Base class for off-policy learners for the continuous action setting."""
@property
def policy_type(self) -> PolicyType:
"""Type of the bandit policy."""
return PolicyType.OFFLINE
@abstractmethod
def fit(
self,
) -> None:
"""Fits an offline bandit policy on the given logged bandit data."""
raise NotImplementedError
@abstractmethod
def predict(self, context: np.ndarray) -> np.ndarray:
"""Predict the best continuous action value for new data.
Parameters
-----------
context: array-like, shape (n_rounds_of_new_data, dim_context)
Context vectors for new data.
Returns
-----------
action: array-like, shape (n_rounds_of_new_data,)
Action choices made by a policy trained by calling the `fit` method.
"""
raise NotImplementedError