-
Notifications
You must be signed in to change notification settings - Fork 13
/
base.yaml
64 lines (55 loc) · 2.44 KB
/
base.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
project: VisDiff
wandb: true # whether to log to wandb
seed: 0 # random seed
data:
root: ./data
name: Data # name of dataset
group1: "A" # name of group 1
group2: "B" # name of group 2
purity: 1.0 # how much of each concept is in each group (1.0 means perfect seperation, 0.5 means perfect mix)
subset: False # if you want to use a subset of the dataset, set this to name of the desired subset value
captioner:
model: blip # model used in method
prompt: "Describe this image in detail." # prompt to use
# proposer: # VLM Proposer
# method: VLMProposer # how to propose hypotheses
# model: llava
# num_rounds: 3 # number of rounds to propose
# num_samples: 20 # number of samples per group to use
# sampling_method: random # how to sample
# num_hypotheses: 10 # number of hypotheses to generate per round
# prompt: VLM_PROMPT # prompt to use
proposer: # LLM Proposer
method: LLMProposer # how to propose hypotheses
model: gpt-4 # model used in method
num_rounds: 3 # number of rounds to propose
num_samples: 20 # number of samples per group to use
sampling_method: random # how to sample
num_hypotheses: 10 # number of hypotheses to generate per round
prompt: CLIP_FRIENDLY # prompt to use
# proposer: # VLM Feature Proposer
# method: VLMFeatureProposer # how to propose hypotheses
# num_rounds: 3 # number of rounds to propose
# num_samples: 20 # number of samples per group to use
# sampling_method: random # how to sample
# num_hypotheses: 10 # number of hypotheses to generate per round
ranker: # CLIP Ranker
method: CLIPRanker # how to rank hypotheses
clip_model: ViT-bigG-14 # clip model to use
clip_dataset: laion2b_s39b_b160k # clip dataset to use
max_num_samples: 5000 # maximum number of samples to use
classify_threshold: 0.3 # threshold for clip classification
# ranker: # LLM Ranker
# method: LLMRanker # how to rank hypotheses
# captioner_model: llava # captioner to use
# captioner_prompt: "describe this image in detail."
# model: vicuna # model used in method
# classify_threshold: 0.5 # threshold for clip classification
# ranker: # VLM Ranker
# method: VLMRanker # how to rank hypotheses
# model: llava # model used in method
# classify_threshold: 0.5 # threshold for clip classification
evaluator:
method: GPTEvaluator # how to evaluate hypotheses
model: gpt-4 # model used in method
n_hypotheses: 5 # number of hypotheses to evaluate